repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
appsembler/figures
devsite/devsite/management/commands/check_devsite.py
Command.run_devsite_celery_task
python
def run_devsite_celery_task(self): print('Checking Celery...') msg = 'run_devsite_check management command' result = celery_check.delay(msg) print(('Task called. task_id={}'.format(result.task_id))) try: print(('result={}'.format(result.get()))) except NotImplementedError as e: print(('Error: {}'.format(e))) print('Done checking Celery')
Perform basic Celery checking In production, we typically don't want to call `.get()`, but trying it here just to see if the results backend is configured and working See the `get` method here: https://docs.celeryproject.org/en/stable/reference/celery.result.html
https://github.com/appsembler/figures/blob/c14d059b425d81710b0d4c7ebea317a65592998a/devsite/devsite/management/commands/check_devsite.py#L19-L38
from __future__ import absolute_import from __future__ import print_function from django.core.management.base import BaseCommand from devsite.celery import celery_check class Command(BaseCommand):
MIT License
iffix/machin
machin/frame/buffers/prioritized_buffer.py
WeightTree.get_weight_sum
python
def get_weight_sum(self) -> float: return self.weights[-1]
Returns: Total weight sum.
https://github.com/iffix/machin/blob/7fa986b1bafdefff117d6ff73d14644a5488de9d/machin/frame/buffers/prioritized_buffer.py#L54-L59
from typing import Union, Dict, List, Tuple, Any from ..transition import TransitionBase from .buffer import Buffer import torch as t import numpy as np class WeightTree: def __init__(self, size: int): self.size = size self.max_leaf = 0 self.depth = int(np.ceil(np.log2(self.size))) + 1 level_sizes_log = np.arange(self.depth - 1, -1, -1) self.sizes = np.power(2, level_sizes_log) self.offsets = np.concatenate(([0], np.cumsum(self.sizes))) self.weights = np.zeros([self.offsets[-1]], dtype=np.float64)
MIT License
docusign/docusign-python-client
docusign_esign/models/user_info.py
UserInfo.user_name
python
def user_name(self, user_name): self._user_name = user_name
Sets the user_name of this UserInfo. # noqa: E501 :param user_name: The user_name of this UserInfo. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/user_info.py#L342-L351
import pprint import re import six from docusign_esign.client.configuration import Configuration class UserInfo(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_id': 'str', 'account_name': 'str', 'activation_access_code': 'str', 'email': 'str', 'error_details': 'ErrorDetails', 'login_status': 'str', 'membership_id': 'str', 'send_activation_email': 'str', 'uri': 'str', 'user_id': 'str', 'user_name': 'str', 'user_status': 'str', 'user_type': 'str' } attribute_map = { 'account_id': 'accountId', 'account_name': 'accountName', 'activation_access_code': 'activationAccessCode', 'email': 'email', 'error_details': 'errorDetails', 'login_status': 'loginStatus', 'membership_id': 'membershipId', 'send_activation_email': 'sendActivationEmail', 'uri': 'uri', 'user_id': 'userId', 'user_name': 'userName', 'user_status': 'userStatus', 'user_type': 'userType' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._account_id = None self._account_name = None self._activation_access_code = None self._email = None self._error_details = None self._login_status = None self._membership_id = None self._send_activation_email = None self._uri = None self._user_id = None self._user_name = None self._user_status = None self._user_type = None self.discriminator = None setattr(self, "_{}".format('account_id'), kwargs.get('account_id', None)) setattr(self, "_{}".format('account_name'), kwargs.get('account_name', None)) setattr(self, "_{}".format('activation_access_code'), kwargs.get('activation_access_code', None)) setattr(self, "_{}".format('email'), kwargs.get('email', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('login_status'), kwargs.get('login_status', None)) setattr(self, "_{}".format('membership_id'), kwargs.get('membership_id', None)) setattr(self, "_{}".format('send_activation_email'), kwargs.get('send_activation_email', None)) setattr(self, "_{}".format('uri'), kwargs.get('uri', None)) setattr(self, "_{}".format('user_id'), kwargs.get('user_id', None)) setattr(self, "_{}".format('user_name'), kwargs.get('user_name', None)) setattr(self, "_{}".format('user_status'), kwargs.get('user_status', None)) setattr(self, "_{}".format('user_type'), kwargs.get('user_type', None)) @property def account_id(self): return self._account_id @account_id.setter def account_id(self, account_id): self._account_id = account_id @property def account_name(self): return self._account_name @account_name.setter def account_name(self, account_name): self._account_name = account_name @property def activation_access_code(self): return self._activation_access_code @activation_access_code.setter def activation_access_code(self, activation_access_code): self._activation_access_code = activation_access_code @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property def error_details(self): return self._error_details @error_details.setter def error_details(self, error_details): self._error_details = error_details @property def login_status(self): return self._login_status @login_status.setter def login_status(self, login_status): self._login_status = login_status @property def membership_id(self): return self._membership_id @membership_id.setter def membership_id(self, membership_id): self._membership_id = membership_id @property def send_activation_email(self): return self._send_activation_email @send_activation_email.setter def send_activation_email(self, send_activation_email): self._send_activation_email = send_activation_email @property def uri(self): return self._uri @uri.setter def uri(self, uri): self._uri = uri @property def user_id(self): return self._user_id @user_id.setter def user_id(self, user_id): self._user_id = user_id @property def user_name(self): return self._user_name @user_name.setter
MIT License
aldebaran/qibuild
python/qisrc/manifest.py
Manifest.remove_repo
python
def remove_repo(self, project_name): matching_repo = self.get_repo(project_name) if not matching_repo: raise Exception("No such repo:", project_name) self.repos.remove(matching_repo)
Remove a repo from the manifest
https://github.com/aldebaran/qibuild/blob/efea6fa3744664348717fe5e8df708a3cf392072/python/qisrc/manifest.py#L201-L206
from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import io import copy import functools import collections import six import qisys.sh import qisys.qixml import qisrc.groups import qisrc.git_config class ManifestError(Exception): pass class Manifest(object): def __init__(self, manifest_xml, review=True): self.manifest_xml = manifest_xml self.review = review self.repos = list() self.remotes = list() self.default_branch = None self.groups = qisrc.groups.Groups() self.load() def change_config(func): @functools.wraps(func) def new_func(self, *args, **kwargs): res = func(self, *args, **kwargs) self.dump() self.load() return res return new_func def load(self): project_names = list() self.repos = list() self.remotes = list() self.import_manifest = list() self.groups = qisrc.groups.Groups() root = qisys.qixml.read(self.manifest_xml).getroot() parser = ManifestParser(self) parser.parse(root) for repo in self.repos: if repo.project in project_names: raise ManifestError("%s found twice" % repo.project) project_names.append(repo.project) for remote in self.remotes: if remote.review and not self.review: continue remote.parse_url() review_remotes = list() for remote in self.remotes: if remote.review: review_remotes.append(remote) if len(review_remotes) > 1: mess = """ \ Only one remote can be configured with review="true", found {0} """.format(len(review_remotes)) raise ManifestError(mess) for import_manifest in self.import_manifest: self.set_remote(import_manifest, import_manifest.default_remote_name) srcs = dict() for repo in self.repos: if repo.src in srcs: mess = """ \ Found two projects sharing the same sources: * {0} * {1} """.format(srcs[repo.src], repo) raise ManifestError(mess) for remote_name in repo.remote_names: self.set_remote(repo, remote_name) srcs[repo.src] = repo def set_remote(self, repo, remote_name): matching_remote = self.get_remote(remote_name) if not matching_remote: raise ManifestError("No matching remote: %s for repo %s" % (remote_name, repo.project)) if matching_remote.review and not self.review: return remote = copy.copy(matching_remote) remote.url = matching_remote.prefix + repo.project if repo.default_branch is None: if self.default_branch: repo.default_branch = self.default_branch else: repo.default_branch = remote.default_branch if remote.name == repo.default_remote_name: remote.default = True if repo.fixed_ref: repo.default_branch = None repo.remotes.append(remote) def dump(self): parser = ManifestParser(self) xml_elem = parser.xml_elem() qisys.qixml.write(xml_elem, self.manifest_xml) def get_repos(self, groups=None, get_all=False): default_group = self.groups.default_group if groups is None: if default_group and not get_all: groups = [default_group.name] else: return self.repos repos = collections.OrderedDict() for group in groups: try: project_names = self.groups.projects(group) except qisrc.groups.GroupError as e: raise ManifestError(str(e)) for project_name in project_names: matching_repo = self.get_repo(project_name) if matching_repo: repos[project_name] = matching_repo else: raise ManifestError("""When reading group {0}: No such project: {1} """.format(group, project_name)) values = list() if six.PY3: values = list(repos.values()) else: values = repos.values() return values def get_repo(self, project): for repo in self.repos: if repo.project == project: return repo return None def get_remote(self, name): for remote in self.remotes: if remote.name == name: return remote return None @change_config def add_remote(self, name, url, review=False): remote = qisrc.git_config.Remote() remote.name = name remote.url = url remote.review = review self.remotes.append(remote) @change_config def add_repo(self, project_name, src, remote_names, default_branch="master"): repo = RepoConfig() repo.project = project_name repo.src = src repo.remote_names = remote_names repo.default_branch = default_branch self.repos.append(repo) @change_config
BSD 3-Clause New or Revised License
google/containerregistry
client/v1/docker_session_.py
Push.__init__
python
def __init__(self, name, creds, transport): self._name = name self._basic_creds = creds self._transport = transport self._top = None
Constructor. Args: name: the fully-qualified name of the tag to push. creds: provider for authorizing requests. transport: the http transport to use for sending requests. Raises: TypeError: an incorrectly typed argument was supplied.
https://github.com/google/containerregistry/blob/8a11dc8c53003ecf5b72ffaf035ba280109356ac/client/v1/docker_session_.py#L40-L55
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging from containerregistry.client import docker_creds from containerregistry.client import docker_name from containerregistry.client.v1 import docker_creds as v1_creds from containerregistry.client.v1 import docker_http from containerregistry.client.v1 import docker_image import httplib2 import six.moves.http_client class Push(object):
Apache License 2.0
rapid7/vm-console-client-python
rapid7vmconsole/models/policy.py
Policy.store_scap
python
def store_scap(self, store_scap): self._store_scap = store_scap
Sets the store_scap of this Policy. Whether Asset Reporting Format (ARF) results are stored. If you are required to submit reports of your policy scan results to the U.S. government in ARF for SCAP certification, you will need to store SCAP data so that it can be exported in this format. Note that stored SCAP data can accumulate rapidly, which can have a significant impact on file storage. Defaults to `false`. # noqa: E501 :param store_scap: The store_scap of this Policy. # noqa: E501 :type: bool
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/policy.py#L144-L153
import pprint import re import six class Policy(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'enabled': 'list[int]', 'links': 'list[Link]', 'recursive_windows_fs_search': 'bool', 'store_scap': 'bool' } attribute_map = { 'enabled': 'enabled', 'links': 'links', 'recursive_windows_fs_search': 'recursiveWindowsFSSearch', 'store_scap': 'storeSCAP' } def __init__(self, enabled=None, links=None, recursive_windows_fs_search=None, store_scap=None): self._enabled = None self._links = None self._recursive_windows_fs_search = None self._store_scap = None self.discriminator = None if enabled is not None: self.enabled = enabled if links is not None: self.links = links if recursive_windows_fs_search is not None: self.recursive_windows_fs_search = recursive_windows_fs_search if store_scap is not None: self.store_scap = store_scap @property def enabled(self): return self._enabled @enabled.setter def enabled(self, enabled): self._enabled = enabled @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def recursive_windows_fs_search(self): return self._recursive_windows_fs_search @recursive_windows_fs_search.setter def recursive_windows_fs_search(self, recursive_windows_fs_search): self._recursive_windows_fs_search = recursive_windows_fs_search @property def store_scap(self): return self._store_scap @store_scap.setter
MIT License
bobankh/auto-generate-changelog
main.py
generate_changelog
python
def generate_changelog(releases, part_name): info_list = [] CHANGELOG = '# CHANGELOG\n\n' for release_tag in releases: release_info = '' release_commits = releases[release_tag]['commits'] if release_tag == 'Unreleased': title = 'Unreleased' description = 'Changes unreleased.' release_info = f'''## {title}\n\n{description}\n\n''' else: title = release_tag url = releases[release_tag]['html_url'] origin_desc = re.split(r'<!-- HIDE IN CHANGELOG BEGIN -->(?:.|\n)*?<!-- HIDE IN CHANGELOG END -->', releases[release_tag]['body']) if len(origin_desc) == 1: description = origin_desc[0] else: description = '' for elem in origin_desc: if elem == origin_desc[0]: para = re.sub(r'\n*$', r'', elem) description = description + para elif elem == origin_desc[-1]: para = re.sub(r'^\n*', r'', elem) if para == '': continue elif description == '': description = description + para else: description = description + '\n\n' + para else: para = re.sub(r'\n*$', r'', elem) para = re.sub(r'^\n*', r'', para) if para == '': continue elif description == '': description = description + para else: description = description + '\n\n' + para date = releases[release_tag]['created_at'] if description == '': description = '*No description*' release_info = f'''## [{title}]({url}) - {date}\n\n{description}\n\n''' release_body = generate_release_body(release_commits, part_name) if release_body == '' and release_tag == 'Unreleased': continue else: release_info = release_info + release_body info_list.append(release_info) for j in info_list: CHANGELOG = CHANGELOG + j CHANGELOG = CHANGELOG + r'\* *This CHANGELOG was automatically generated by [auto-generate-changelog](https://github.com/BobAnkh/auto-generate-changelog)*' CHANGELOG = CHANGELOG + '\n' return CHANGELOG
Generate CHANGELOG Args: releases: dict of release data part_name (list): a list of part_name, e.g. feat:Feature Returns: string: content of CHANGELOG
https://github.com/bobankh/auto-generate-changelog/blob/920f902dddd57169f5868154681f43659b79b818/main.py#L313-L376
import argparse import base64 import os import re import github import yaml from tqdm import tqdm def argument_parser(): parser = argparse.ArgumentParser() parser.add_argument( '-m', '--mode', help= 'choose to use local-dev mode or on github action mode. Valid values are \'local\' or \'github\'', default='github') parser.add_argument( '-f', '--file', help='configuration file to read from when running local-dev mode', default='.github/workflows/changelog.yml') parser.add_argument('-o', '--output', help='output file when running local-dev mode', default='local-dev.md') parser.add_argument('-t', '--token', help='Github Access Token') args = parser.parse_args() return args def set_local_env(env_name: str, env_value: str, prefix='INPUT'): os.environ[prefix + '_{}'.format(env_name).upper()] = env_value def get_inputs(input_name: str, prefix='INPUT') -> str: return os.getenv(prefix + '_{}'.format(input_name).upper()) def set_env_from_file(file, args, prefix='INPUT'): f = open(file, encoding='utf-8') y = yaml.safe_load(f) for job in y['jobs'].values(): for step in job['steps']: if re.match(r'BobAnkh/auto-generate-changelog', step['uses']): params = step['with'] break option_params = [ 'REPO_NAME', 'ACCESS_TOKEN', 'PATH', 'COMMIT_MESSAGE', 'TYPE', 'COMMITTER' ] for param in option_params: if param not in params.keys(): if param == 'ACCESS_TOKEN' and args.token: tmp = args.token else: tmp = input('Please input the value of ' + param + ':') elif param == 'ACCESS_TOKEN': if re.match(r'\$\{\{secrets\.', params[param]): if args.token: tmp = args.token else: tmp = input('Please input the value of ' + param + ':') else: tmp = params[param] elif param == 'REPO_NAME' and params[param] == '': tmp = input('Please input the value of ' + param + ':') else: tmp = params[param] set_local_env(param, tmp, prefix) class GithubChangelog: def __init__(self, ACCESS_TOKEN, REPO_NAME, PATH, BRANCH, PULL_REQUEST, COMMIT_MESSAGE, COMMITTER): self.__commit_message = COMMIT_MESSAGE self.__path = PATH self.__branch = BRANCH self.__pull_request = PULL_REQUEST self.__sha = '' self.__releases = {} self.__changelog = '' self.__file_exists = False g = github.Github(ACCESS_TOKEN) self.__repo = g.get_repo(REPO_NAME) self.__author = github.GithubObject.NotSet if COMMITTER == '' else github.InputGitAuthor(COMMITTER.split(' ')[0], COMMITTER.split(' ')[1]) def get_data(self): releases = self.__repo.get_releases() self.__releases['Unreleased'] = {'html_url': '', 'body': '', 'created_at': '', 'commit_sha': ''} for release in releases: self.__releases[release.tag_name] = {'html_url': release.html_url, 'body': re.sub(r'\r\n', r'\n', release.body), 'created_at': release.created_at} tags = self.__repo.get_tags() for tag in tags: if tag.name in self.__releases: self.__releases[tag.name]['commit_sha'] = tag.commit.sha release_commit_sha_list = {self.__releases[x]['commit_sha']:x for x in self.__releases} release_tags = list(self.__releases.keys())[::-1] seq = 0 commits = self.__repo.get_commits(sha=self.__branch).reversed selected_commits = [] pbar = tqdm(desc='Commits progress', total=commits.totalCount) for commit in commits: message = commit.commit.message.split('\n\n') message_head = message[0] if message_head[-3:] == '...' and len(message) > 1: if message[1][0:3] == '...': message_head = re.sub(r' ', r' ', message_head[:-3] + ' ' + message[1].split('\n')[0][3:]) url = commit.html_url pulls = commit.get_pulls() pr_links = [] if pulls.totalCount == 0: pass else: for pull in pulls: pr = f''' ([#{pull.number}]({pull.html_url}))''' pr_links.append(pr) selected_commits.append({'head': message_head, 'sha': commit.sha, 'url': url, 'pr_links': pr_links}) if commit.sha == self.__releases[release_tags[seq]]['commit_sha']: self.__releases[release_tags[seq]]['commits'] = selected_commits[::-1] selected_commits = [] seq = seq + 1 else: if commit.sha in release_commit_sha_list: while (seq < release_tags.index(release_commit_sha_list[commit.sha])): print(f'\n[DEBUG]Skip {release_tags[seq]} because the release commit is not in the log history') self.__releases[release_tags[seq]]['commits'] = [] seq = seq + 1 if commit.sha == self.__releases[release_tags[seq]]['commit_sha']: self.__releases[release_tags[seq]]['commits'] = selected_commits[::-1] selected_commits = [] seq = seq + 1 pbar.update(1) pbar.close() while (seq < len(release_tags) - 1): print(f'\n[DEBUG]Skip {release_tags[seq]} because the release commit is not in the log history') self.__releases[release_tags[seq]]['commits'] = [] seq = seq + 1 self.__releases[release_tags[seq]]['commits'] = selected_commits[::-1] try: contents = self.__repo.get_contents(self.__path, self.__branch) except github.GithubException as e: if e.status == 404: self.__changelog = '' else: raise github.GithubException(e.status, e.data) else: self.__file_exists = True self.__path = contents.path self.__sha = contents.sha base = contents.content base = base.replace('\n', '') self.__changelog = base64.b64decode(base).decode('utf-8') def read_releases(self): return self.__releases def write_data(self, changelog): if changelog == self.__changelog: pass else: if self.__file_exists: self.__repo.update_file(self.__path, self.__commit_message, changelog, self.__sha, self.__branch, self.__author) else: self.__repo.create_file(self.__path, self.__commit_message, changelog, self.__branch, self.__author) print(f'[DEBUG] BRANCH: {self.__branch}, PULL_REQUEST: {self.__pull_request}') if self.__pull_request != '' and self.__pull_request != self.__branch: self.repo.create_pull(title=self.__commit_message, body=self.__commit_message, base=self.__pull_request, head=self.__branch, draft=False, maintainer_can_modify=True) def strip_commits(commits, type_regex): regex = r'^'+ type_regex + r'[(](.+?)[)]' scopes = {} for commit in commits: if re.match(regex, commit['head']): scope = re.findall(regex, commit['head'])[0] if scope.lower() == 'changelog' and regex == r'^docs[(](.+?)[)]': continue subject = re.sub(regex + r'\s?:\s?', '', commit['head']) if scope in scopes: scopes[scope].append({'subject': subject, 'commit': commit}) else: scopes[scope] = [] scopes[scope].append({'subject': subject, 'commit': commit}) return scopes def generate_section(release_commits, regex): section = '' scopes = strip_commits(release_commits, regex) for scope in scopes: scope_content = f'''- {scope}:\n''' for sel_commit in scopes[scope]: commit = sel_commit['commit'] sha = commit['sha'] url = commit['url'] subject = sel_commit['subject'] pr_links = commit['pr_links'] scope_content = scope_content + f''' - {subject} ([{sha[0:7]}]({url}))''' for pr_link in pr_links: scope_content = scope_content + pr_link scope_content = scope_content + '\n' section = section + scope_content + '\n' return section def generate_release_body(release_commits, part_name): release_body = '' for part in part_name: regex, name = part.split(':') sec = generate_section(release_commits, regex) if sec != '': release_body = release_body + '### ' + name + '\n\n' + sec return release_body
Apache License 2.0
noxdafox/clipspy
clips/classes.py
Class.superclass
python
def superclass(self, defclass: 'Class') -> bool: return lib.SuperclassP(self._ptr(), defclass._ptr())
True if the Class is a superclass of the given one.
https://github.com/noxdafox/clipspy/blob/a317964dc86755619d84b9adf4008d62663889ce/clips/classes.py#L289-L291
import os import clips from clips.modules import Module from clips.common import PutSlotError, PUT_SLOT_ERROR from clips.common import CLIPSError, SaveMode, ClassDefaultMode from clips.common import environment_builder, environment_modifier from clips._clips import lib, ffi class Instance: __slots__ = '_env', '_ist' def __init__(self, env: ffi.CData, ist: ffi.CData): self._env = env self._ist = ist lib.RetainInstance(self._ist) def __del__(self): try: lib.ReleaseInstance(self._ist) except (AttributeError, TypeError): pass def __hash__(self): return hash(self._ist) def __eq__(self, ist): return self._ist == ist._ist def __str__(self): return ' '.join(instance_pp_string(self._env, self._ist).split()) def __repr__(self): string = ' '.join(instance_pp_string(self._env, self._ist).split()) return "%s: %s" % (self.__class__.__name__, string) def __iter__(self): slot_names = (s.name for s in self.instance_class.slots()) return ((n, slot_value(self._env, self._ist, n)) for n in slot_names) def __getitem__(self, slot): return slot_value(self._env, self._ist, slot) @property def name(self) -> str: return ffi.string(lib.InstanceName(self._ist)).decode() @property def instance_class(self) -> 'Class': defclass = lib.InstanceClass(self._ist) name = ffi.string(lib.DefclassName(defclass)).decode() return Class(self._env, name) def modify_slots(self, **slots): modifier = environment_modifier(self._env, 'instance') ret = lib.IMSetInstance(modifier, self._ist) if ret != lib.IME_NO_ERROR: raise CLIPSError(self._env, code=ret) for slot, slot_val in slots.items(): value = clips.values.clips_value(self._env, value=slot_val) ret = lib.IMPutSlot(modifier, str(slot).encode(), value) if ret != PutSlotError.PSE_NO_ERROR: raise PUT_SLOT_ERROR[ret](slot) if lib.IMModify(modifier) is ffi.NULL: raise CLIPSError(self._env, code=lib.IMError(self._env)) def send(self, message: str, arguments: str = None) -> type: output = clips.values.clips_value(self._env) instance = clips.values.clips_value(self._env, value=self) args = arguments.encode() if arguments is not None else ffi.NULL lib.Send(self._env, instance, message.encode(), args, output) return clips.values.python_value(self._env, output) def delete(self): ret = lib.DeleteInstance(self._ist) if ret != lib.UIE_NO_ERROR: raise CLIPSError(self._env, code=ret) def unmake(self): ret = lib.UnmakeInstance(self._ist) if ret != lib.UIE_NO_ERROR: raise CLIPSError(self._env, code=ret) class Class: __slots__ = '_env', '_name' def __init__(self, env: ffi.CData, name: str): self._env = env self._name = name.encode() def __hash__(self): return hash(self._ptr()) def __eq__(self, cls): return self._ptr() == cls._ptr() def __str__(self): string = lib.DefclassPPForm(self._ptr()) string = ffi.string(string).decode() if string != ffi.NULL else '' return ' '.join(string.split()) def __repr__(self): string = lib.DefclassPPForm(self._ptr()) string = ffi.string(string).decode() if string != ffi.NULL else '' return "%s: %s" % (self.__class__.__name__, ' '.join(string.split())) def _ptr(self) -> ffi.CData: cls = lib.FindDefclass(self._env, self._name) if cls == ffi.NULL: raise CLIPSError(self._env, 'Class <%s> not defined' % self.name) return cls @property def abstract(self) -> bool: return lib.ClassAbstractP(self._ptr()) @property def reactive(self) -> bool: return lib.ClassReactiveP(self._ptr()) @property def name(self) -> str: return ffi.string(lib.DefclassName(self._ptr())).decode() @property def module(self) -> Module: name = ffi.string(lib.DefclassModule(self._ptr())).decode() return Module(self._env, name) @property def deletable(self) -> bool: return lib.DefclassIsDeletable(self._ptr()) @property def watch_instances(self) -> bool: return lib.DefclassGetWatchInstances(self._ptr()) @watch_instances.setter def watch_instances(self, flag: bool): lib.DefclassSetWatchInstances(self._ptr(), flag) @property def watch_slots(self) -> bool: return lib.DefclassGetWatchSlots(self._ptr()) @watch_slots.setter def watch_slots(self, flag: bool): lib.DefclassSetWatchSlots(self._ptr(), flag) def make_instance(self, instance_name: str = None, **slots) -> Instance: builder = environment_builder(self._env, 'instance') ret = lib.IBSetDefclass(builder, lib.DefclassName(self._ptr())) if ret != lib.IBE_NO_ERROR: raise CLIPSError(self._env, code=ret) for slot, slot_val in slots.items(): value = clips.values.clips_value(self._env, value=slot_val) ret = lib.IBPutSlot(builder, str(slot).encode(), value) if ret != PutSlotError.PSE_NO_ERROR: raise PUT_SLOT_ERROR[ret](slot) instance = lib.IBMake( builder, instance_name.encode() if instance_name is not None else ffi.NULL) if instance != ffi.NULL: return Instance(self._env, instance) else: raise CLIPSError(self._env, code=lib.FBError(self._env)) def subclass(self, defclass: 'Class') -> bool: return lib.SubclassP(self._ptr(), defclass._ptr())
BSD 3-Clause New or Revised License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1beta1_eviction.py
V1beta1Eviction.kind
python
def kind(self, kind): self._kind = kind
Sets the kind of this V1beta1Eviction. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1beta1Eviction. # noqa: E501 :type: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1beta1_eviction.py#L126-L135
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1beta1Eviction(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'delete_options': 'V1DeleteOptions', 'kind': 'str', 'metadata': 'V1ObjectMeta' } attribute_map = { 'api_version': 'apiVersion', 'delete_options': 'deleteOptions', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, delete_options=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._delete_options = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version if delete_options is not None: self.delete_options = delete_options if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def delete_options(self): return self._delete_options @delete_options.setter def delete_options(self, delete_options): self._delete_options = delete_options @property def kind(self): return self._kind @kind.setter
Apache License 2.0
meolu/walle-web
walle/api/project.py
ProjectAPI.put
python
def put(self, project_id, action=None): super(ProjectAPI, self).put() if action and action == 'members': return self.members(project_id, members=json.loads(request.data.decode('utf-8'))) form = ProjectForm(request.form, csrf=False) form.set_id(project_id) if form.validate_on_submit(): server = ProjectModel().get_by_id(project_id) repo_url_origin = server.repo_url data = form.form2dict() ret = server.update(data) if repo_url_origin != data['repo_url']: dir_codebase_project = current_app.config.get('CODE_BASE') + str(project_id) if os.path.exists(dir_codebase_project): shutil.rmtree(dir_codebase_project) return self.render_json(data=server.item()) else: return self.render_error(code=Code.form_error, message=form.errors)
update project /project/<int:id> :return:
https://github.com/meolu/walle-web/blob/f96dc41ed882782d52ec62fddbe0213b8b9158ec/walle/api/project.py#L109-L137
import json import os, shutil from flask import request, abort from walle.api.api import SecurityResource from walle.form.project import ProjectForm from walle.model.member import MemberModel from walle.model.project import ProjectModel from walle.service.extensions import permission from walle.service.rbac.role import * from walle.service.deployer import Deployer class ProjectAPI(SecurityResource): actions = ['members', 'copy', 'detection'] @permission.upper_reporter def get(self, action=None, project_id=None): super(ProjectAPI, self).get() return self.item(project_id) if project_id else self.list() def list(self): page = int(request.args.get('page', 0)) page = page - 1 if page else 0 size = int(request.args.get('size', 10)) kw = request.values.get('kw', '') environment_id = request.values.get('environment_id', '') project_model = ProjectModel() project_list, count = project_model.list(page=page, size=size, kw=kw, environment_id=environment_id, space_id=self.space_id) return self.list_json(list=project_list, count=count, enable_create=permission.role_upper_master() and current_user.role != SUPER) def item(self, project_id): project_model = ProjectModel(id=project_id) current_app.logger.info(project_id) project_info = project_model.item(id=project_id) current_app.logger.info(project_info) if not project_info: return self.render_json(code=-1) project_info['members'], count, project_info['user_uids'] = MemberModel().members(project_id=project_id) return self.render_json(data=project_info) @permission.upper_developer def post(self, action=None, project_id=None): super(ProjectAPI, self).post() if action is None: return self.create() if action in self.actions: self_action = getattr(self, action.lower(), None) return self_action(project_id) else: abort(404) def create(self): form = ProjectForm(request.form, csrf=False) if form.validate_on_submit(): project = ProjectModel() data = form.form2dict() project_new = project.add(data) if not project_new: return self.render_json(code=-1) return self.render_json(data=project_new) else: return self.render_error(code=Code.form_error, message=form.errors) @permission.upper_developer
Apache License 2.0
hema-ted/pycdft
pycdft/dft_driver/base.py
DFTDriver.run_opt
python
def run_opt(self): pass
Order the DFT code to run one structure relaxation step.
https://github.com/hema-ted/pycdft/blob/d6f54dcfbee309ba310791b39119257b66c5560d/pycdft/dft_driver/base.py#L49-L51
from abc import ABCMeta, abstractmethod from pycdft.common.sample import Sample class DFTDriver(object): __metaclass__ = ABCMeta def __init__(self, sample: Sample): self.sample = sample self.istep = None self.icscf = None self.output_path = None @abstractmethod def reset(self, output_path): pass @abstractmethod def set_Vc(self, Vc): pass @abstractmethod def run_scf(self): pass @abstractmethod
MIT License
celery/django-celery
djcelery/humanize.py
naturaldate
python
def naturaldate(date, include_seconds=False): if not date: return '' right_now = now() today = datetime(right_now.year, right_now.month, right_now.day, tzinfo=right_now.tzinfo) delta = right_now - date delta_midnight = today - date days = delta.days hours = delta.seconds // 3600 minutes = delta.seconds // 60 seconds = delta.seconds if days < 0: return _('just now') if days == 0: if hours == 0: if minutes > 0: return ungettext( _('{minutes} minute ago'), _('{minutes} minutes ago'), minutes ).format(minutes=minutes) else: if include_seconds and seconds: return ungettext( _('{seconds} second ago'), _('{seconds} seconds ago'), seconds ).format(seconds=seconds) return _('just now') else: return ungettext( _('{hours} hour ago'), _('{hours} hours ago'), hours ).format(hours=hours) if delta_midnight.days == 0: return _('yesterday at {time}').format(time=date.strftime('%H:%M')) count = 0 for chunk, pluralizefun in OLDER_CHUNKS: if days >= chunk: count = int(round((delta_midnight.days + 1) / chunk, 0)) fmt = pluralizefun(count) return fmt.format(num=count)
Convert datetime into a human natural date string.
https://github.com/celery/django-celery/blob/a78b3a5e0d3b28ee550c70cc509d608744c88337/djcelery/humanize.py#L38-L85
from __future__ import absolute_import, unicode_literals from datetime import datetime from django.utils.translation import ungettext, ugettext as _ from .utils import now def pluralize_year(n): return ungettext(_('{num} year ago'), _('{num} years ago'), n) def pluralize_month(n): return ungettext(_('{num} month ago'), _('{num} months ago'), n) def pluralize_week(n): return ungettext(_('{num} week ago'), _('{num} weeks ago'), n) def pluralize_day(n): return ungettext(_('{num} day ago'), _('{num} days ago'), n) OLDER_CHUNKS = ( (365.0, pluralize_year), (30.0, pluralize_month), (7.0, pluralize_week), (1.0, pluralize_day), ) def _un(singular__plural, n=None): singular, plural = singular__plural return ungettext(singular, plural, n)
BSD 3-Clause New or Revised License
walkerning/aw_nas
aw_nas/utils/box_utils.py
encode
python
def encode(matched, priors, variances): g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 p_cxcy = (priors[:, :2] + priors[:, 2:]) / 2 p_wh = priors[:, 2:] - priors[:, :2] g_cxcy -= p_cxcy g_cxcy /= (variances[0] * p_wh) g_wh = (matched[:, 2:] - matched[:, :2]) / p_wh g_wh = torch.log(g_wh) / variances[1] return torch.cat([g_cxcy, g_wh], 1)
Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 4]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded boxes (tensor), Shape: [num_priors, 4]
https://github.com/walkerning/aw_nas/blob/8a32196ce342b8ad9e3885895735d1286e25beba/aw_nas/utils/box_utils.py#L34-L58
import collections import itertools import math import numpy as np import torch import torch.nn as nn def decode(loc, priors, variances): p_cxcy = (priors[:, :2] + priors[:, 2:]) / 2 p_wh = priors[:, 2:] - priors[:, :2] boxes = torch.cat( (p_cxcy + loc[:, :2] * variances[0] * p_wh, p_wh * torch.exp(loc[:, 2:] * variances[1])), 1) boxes[:, :2] -= boxes[:, 2:] / 2 boxes[:, 2:] += boxes[:, :2] return boxes
MIT License
raxod502/mercury
mercury/_api.py
Service.login
python
def login(self, fields): pass
Given values for the fields returned by get_login_fields (a map where the keys are the "field" values from get_login_fields and the values are the strings provided by the user), try to do a login. Throw LoginRequiredError if the auth fails, ServiceError on unexpected error.
https://github.com/raxod502/mercury/blob/a102cbd87040863225802157981e8f532c523c9b/mercury/_api.py#L68-L76
import abc class LoginRequiredError(Exception): pass class ServiceError(Exception): def __init__(self, fmt, *args, **kwargs): super().__init__(fmt.format(*args, **kwargs)) class Service(abc.ABC): @abc.abstractmethod def __init__(self): pass @abc.abstractmethod def get_session(self): pass @abc.abstractmethod def restore_session(self, session): pass @abc.abstractmethod def get_login_fields(self): pass @abc.abstractmethod
MIT License
cslotboom/hysteresis
doc/readthedocs/hysteresis/baseClass.py
CurveBase.getNetArea
python
def getNetArea(self, startIndex = 0, endIndex = 0): Area = self.area if endIndex == 0: endIndex = self.Npoints return np.sum(Area[startIndex:endIndex])
Returns the net area between two indexes in the xy curve. The default setting is to return the area for the whole cuve.
https://github.com/cslotboom/hysteresis/blob/2c41dc9b551a8e9332318a90f0f2477bec163b4d/doc/readthedocs/hysteresis/baseClass.py#L116-L127
import numpy as np from numpy import trapz from scipy.interpolate import interp1d from scipy.signal import find_peaks from hysteresis import data import hysteresis.env as env import matplotlib.pyplot as plt class CurveBase: def __init__(self, XYData, xunit = '', yunit = ''): self.xy = XYData self.Npoints = len(XYData[:,0]) self.areaFunction = env.environment.fArea self.slopeFunction = env.environment.fslope self.lengthFunction = env.environment.flength self.plotFunction = env.environment.fplot self.initializeFig = env.environment.finit self.showCycles = env.environment.fcycles self.colorDict = {0:'C0', 1:'C1', 2:'C3'} self.xunit = xunit self.yunit = yunit def __len__(self): return len(self.xy[:,0]) def setArea(self): self.area = self.areaFunction(self.xy) return self.area def getCumDisp(self): dx = np.diff(self.xy[:,0]) return np.append(0, np.cumsum(np.abs(dx))) def getNetCumDisp(self, startIndex = 0, endIndex = 0): x = self.xy[:,0] dx = np.append(0, np.diff(self.xy[:,0])) if endIndex == 0: endIndex = self.Npoints return np.sum(np.abs(dx[startIndex:endIndex])) def getCumArea(self): Area = self.area return np.cumsum(Area)
Apache License 2.0
kcl-bmeis/exetera
exetera/core/dataset.py
HDF5Dataset.__len__
python
def __len__(self): return len(self._dataframes)
Return the number of dataframes stored in this dataset.
https://github.com/kcl-bmeis/exetera/blob/2149a386b79ae2e6538edee54361614d025ee3b3/exetera/core/dataset.py#L266-L268
from typing import Optional import h5py from exetera.core.abstract_types import DataFrame, Dataset from exetera.core import dataframe as edf class HDF5Dataset(Dataset): def __init__(self, session, dataset_path, mode, name): self.name = name self._session = session self._file = h5py.File(dataset_path, mode) self._dataframes = dict() for group in self._file.keys(): if group not in ('trash',): h5group = self._file[group] dataframe = edf.HDF5DataFrame(self, group, h5group=h5group) self._dataframes[group] = dataframe @property def session(self): return self._session def create_group(self, name: str): return self.create_dataframe(name) def create_dataframe(self, name: str, dataframe: Optional[DataFrame] = None): if dataframe is not None: if not isinstance(dataframe, DataFrame): raise ValueError("If set, 'dataframe' must be of type DataFrame " "but is of type {}".format(type(dataframe))) self._file.create_group(name) h5group = self._file[name] _dataframe = edf.HDF5DataFrame(self, name, h5group) if dataframe is not None: for k, v in dataframe.items(): f = v.create_like(_dataframe, k) if f.indexed: f.indices.write(v.indices[:]) f.values.write(v.values[:]) else: f.data.write(v.data[:]) self._dataframes[name] = _dataframe return _dataframe def require_dataframe(self, name): if self.__contains__(name): return self._dataframes[name] else: return self.create_dataframe(name) def close(self): self._file.close() def copy(self, dataframe, name): copy(dataframe, self, name) def __contains__(self, name: str): return name in self._dataframes def contains_dataframe(self, dataframe: DataFrame): if not isinstance(dataframe, DataFrame): raise TypeError("The field must be a DataFrame object") else: for v in self._dataframes.values(): if id(dataframe) == id(v): return True return False def __getitem__(self, name: str): if not isinstance(name, str): raise TypeError("The name must be a str object.") elif not self.__contains__(name): raise ValueError("Can not find the name from this dataset.") else: return self._dataframes[name] def get_dataframe(self, name: str): return self.__getitem__(name) def __setitem__(self, name: str, dataframe: DataFrame): if not isinstance(name, str): raise TypeError("The name must be a str object.") if not isinstance(dataframe, edf.DataFrame): raise TypeError("The field must be a DataFrame object.") if dataframe.dataset == self: del self._dataframes[dataframe.name] dataframe.name = name self._file.move(dataframe.h5group.name, name) else: copy(dataframe, self, name) def __delitem__(self, name: str): if not self.__contains__(name): raise ValueError("This dataframe does not contain the name to delete.") else: del self._dataframes[name] del self._file[name] return True def delete_dataframe(self, dataframe: DataFrame): name = dataframe.name if name is None: raise ValueError("This dataframe does not contain the field to delete.") else: self.__delitem__(name) def drop(self, name: str): del self._dataframes[name] del self._file[name] def keys(self): return self._dataframes.keys() def values(self): return self._dataframes.values() def items(self): return self._dataframes.items() def __iter__(self): return iter(self._dataframes) def __next__(self): return next(self._dataframes)
Apache License 2.0
mila-iqia/platoon
platoon/training/global_dynamics.py
Downpour.make_rule
python
def make_rule(self, local_particle, local_acc_updates, global_particle): import theano from theano.tensor import basic if isinstance(local_particle, theano.compile.SharedVariable): local_particle = [local_particle] if isinstance(local_acc_updates, theano.compile.SharedVariable): local_acc_updates = [local_acc_updates] if isinstance(global_particle, theano.compile.SharedVariable): global_particle = [global_particle] new_global = [] new_local = [] new_acc_updates = [] for lp, lau, gp in zip(local_particle, local_acc_updates, global_particle): global_acc_updates = AllReduceSum(lau, inplace=True) if self.average: global_acc_updates /= self.worker.global_size new_global.append(gp + global_acc_updates) new_local.append(new_global[-1]) new_acc_updates.append(basic.zeros_like(lau)) updates = list(zip(local_particle, new_local)) + list(zip(local_acc_updates, new_acc_updates)) + list(zip(global_particle, new_global)) self._fn = theano.function([], [], updates=updates, accept_inplace=True)
Make Downpour rule. All particles along with the global particle start from the same position. According to this rule, each local particle executes descent normally but their parameter updates are accumulated (e.g. by moving average) to a variable. Every N iterations, the local accumulated updates are added together and applied to the global particle. Each local particle restarts from global particle's position. Parameters ---------- local_particle : {:ref:`theano.compile.SharedVariable`, list of :ref:`theano.compile.SharedVariable`} A particle's position in parameter space doing local SGD. local_acc_updates : {:ref:`theano.compile.SharedVariable`, list of :ref:`theano.compile.SharedVariable`} Shared variable accumulating local parameter updates. global_particle : {:ref:`theano.compile.SharedVariable`, list of :ref:`theano.compile.SharedVariable`} A particle whose position is updated only by the Downpour process and resets position of local particles. .. seealso:: Notes on :meth:`GlobalDynamics.make_rule`
https://github.com/mila-iqia/platoon/blob/4cd26f4235da06967e20679e4cf769b423269165/platoon/training/global_dynamics.py#L274-L324
from __future__ import absolute_import, division from ..channel.worker import Worker from ..ops import AllReduceSum class GlobalDynamics(object): def __init__(self, worker=None): self._worker = None if worker is not None: self.worker = worker self._fn = None def __call__(self): if self._fn is None: raise NotImplementedError("Functionality has not been specified.\n" "Please use {} method to setup GlobalDynamics" "for a set of Variables\nor supply your own" "using {} method.".format( repr(self.make_rule), repr(self.fn))) self._fn() @property def worker(self): if self._worker is None: try: self._worker = Worker() except TypeError: raise AttributeError("Worker instance has not been created yet.") return self._worker @worker.setter def worker(self, inst): if not isinstance(inst, Worker): raise TypeError("Argument `inst` is not of platoon.Worker type.") self._worker = inst def register_fn(self, fun): if not hasattr(fun, '__call__'): raise TypeError("Supplied object is not a callable.") self._fn = fun def make_rule(self, *args): raise NotImplementedError(self.make_rule.__doc__) class _GlobalDynamicsNoSet(GlobalDynamics): def register_fn(self, fun): raise AttributeError("Cannot set internal function. Use {} method.".format( repr(self.make_rule))) class SGD(_GlobalDynamicsNoSet): def __init__(self, average=False, worker=None): self.average = average super(SGD, self).__init__(worker) def make_rule(self, local_updates): import theano if isinstance(local_updates, theano.compile.SharedVariable): local_updates = [local_updates] global_updates = [] for update in local_updates: gup = AllReduceSum(update, inplace=True) if self.average: gup /= self.worker.global_size global_updates.append(gup) self._fn = theano.function([], [], updates=list(zip(local_updates, global_updates)), accept_inplace=True) def SumSGD(worker=None): return SGD(average=False, worker=worker) def AverageSGD(worker=None): return SGD(average=True, worker=worker) class EASGD(_GlobalDynamicsNoSet): def make_rule(self, local_particle, central_particle, alpha): import theano if isinstance(local_particle, theano.compile.SharedVariable): local_particle = [local_particle] if isinstance(central_particle, theano.compile.SharedVariable): central_particle = [central_particle] self.alpha = alpha new_local = [] new_central = [] for local_position, central_position in zip(local_particle, central_particle): distance = local_position - central_position elastic_force = alpha * distance local_new_position = local_position - elastic_force total_elastic_force = AllReduceSum(elastic_force, inplace=True) central_new_position = central_position + total_elastic_force new_local.append(local_new_position) new_central.append(central_new_position) updates = list(zip(local_particle, new_local)) + list(zip(central_particle, new_central)) self._fn = theano.function([], [], updates=updates, accept_inplace=True) class Downpour(_GlobalDynamicsNoSet): def __init__(self, average=False, worker=None): self.average = average super(Downpour, self).__init__(worker)
MIT License
googlecloudplatform/professional-services-data-validator
data_validation/data_validation.py
DataValidation.execute
python
def execute(self): if ( self.config_manager.is_grouped_row_validation or self.config_manager.validation_type == "Row" ): grouped_fields = self.validation_builder.pop_grouped_fields() result_df = self.execute_recursive_validation( self.validation_builder, grouped_fields ) elif self.config_manager.validation_type == consts.SCHEMA_VALIDATION: result_df = self.schema_validator.execute() else: result_df = self._execute_validation( self.validation_builder, process_in_memory=True ) return self.result_handler.execute(self.config, result_df)
Execute Queries and Store Results
https://github.com/googlecloudplatform/professional-services-data-validator/blob/781d8bf259ba2864e05c40dfe2a69d6c954cfec6/data_validation/data_validation.py#L80-L99
import datetime import json import logging import warnings import ibis.backends.pandas import pandas import numpy from data_validation import consts, combiner, metadata from data_validation.config_manager import ConfigManager from data_validation.validation_builder import ValidationBuilder from data_validation.schema_validation import SchemaValidation class DataValidation(object): def __init__( self, config, validation_builder=None, schema_validator=None, result_handler=None, verbose=False, ): self.verbose = verbose self.config = config self.config_manager = ConfigManager(config, verbose=self.verbose) self.run_metadata = metadata.RunMetadata() self.run_metadata.labels = self.config_manager.labels self.validation_builder = validation_builder or ValidationBuilder( self.config_manager ) self.schema_validator = schema_validator or SchemaValidation( self.config_manager, run_metadata=self.run_metadata, verbose=self.verbose ) self.result_handler = result_handler or self.config_manager.get_result_handler()
Apache License 2.0
pybrain2/pybrain2
pybrain/rl/learners/modelbased/policyiteration.py
policyIteration
python
def policyIteration(Ts, R, discountFactor, VEvaluator=None, initpolicy=None, maxIters=20): if initpolicy is None: policy, T = randomPolicy(Ts) else: policy = initpolicy T = collapsedTransitions(Ts, policy) if VEvaluator is None: VEvaluator = lambda T: trueValues(T, R, discountFactor) while maxIters > 0: V = VEvaluator(T) newpolicy, T = greedyPolicy(Ts, R, discountFactor, V) if sum(ravel(abs(newpolicy - policy))) < 1e-3: return policy, T policy = newpolicy maxIters -= 1 return policy, T
Given transition matrices (one per action), produce the optimal policy, using the policy iteration algorithm. A custom function that maps policies to value functions can be provided.
https://github.com/pybrain2/pybrain2/blob/33ead60704d126e58c10d458ddd1e5e5fd17b65d/pybrain/rl/learners/modelbased/policyiteration.py#L121-L143
__author__ = 'Tom Schaul, tom@idsia.ch' from scipy import dot, zeros, zeros_like, ones, mean, array, ravel, rand from numpy.matlib import repmat from pybrain.utilities import all_argmax def trueValues(T, R, discountFactor): assert discountFactor < 1 distr = T.copy() res = dot(T, R) for i in range(1, int(10 / (1. - discountFactor))): distr = dot(distr, T) res += (discountFactor ** i) * dot(distr, R) return res def trueQValues(Ts, R, discountFactor, policy): T = collapsedTransitions(Ts, policy) V = trueValues(T, R, discountFactor) Vnext = V*discountFactor+R numA = len(Ts) dim = len(R) Qs = zeros((dim, numA)) for si in range(dim): for a in range(numA): Qs[si, a] = dot(Ts[a][si], Vnext) return Qs def collapsedTransitions(Ts, policy): res = zeros_like(Ts[0]) dim = len(Ts[0]) for ai, ap in enumerate(policy.T): res += Ts[ai] * repmat(ap, dim, 1).T return res def greedyPolicy(Ts, R, discountFactor, V): dim = len(V) numA = len(Ts) Vnext = V*discountFactor+R policy = zeros((dim, numA)) for si in range(dim): actions = all_argmax([dot(T[si, :], Vnext) for T in Ts]) for a in actions: policy[si, a] = 1. / len(actions) return policy, collapsedTransitions(Ts, policy) def greedyQPolicy(Qs): dim = len(Qs) numA = len(Qs[0]) policy = zeros((dim, numA)) for si in range(dim): actions = all_argmax(Qs[si]) for a in actions: policy[si, a] = 1. / len(actions) return policy def randomPolicy(Ts): numA = len(Ts) dim = len(Ts[0]) return ones((dim, numA)) / float(numA), mean(array(Ts), axis=0) def randomDeterministic(Ts): numA = len(Ts) dim = len(Ts[0]) choices = (rand(dim) * numA).astype(int) policy = zeros((dim, numA)) for si, a in choices: policy[si, a] = 1 return policy, collapsedTransitions(Ts, policy)
BSD 3-Clause New or Revised License
renmengye/imageqa-qgen
question_generator.py
QuestionGenerator.splitCCStructure
python
def splitCCStructure(self, root): roots = [] node = root.children[0] if node.className == 'S': if len(node.children) >= 3: childrenClasses = [] for child in node.children: childrenClasses.append(child.className) renew = True index = 0 for c in childrenClasses: if c == 'S' and renew: root_ = TreeNode('ROOT', '', [node.children[index]], 0) root_.relevel(0) roots.append(root_) elif c == 'CC': renew = True index += 1 if len(roots) == 0: roots.append(root) return roots
Split composite sentences.
https://github.com/renmengye/imageqa-qgen/blob/0fe64f42f52b1e587bad9d23d810d34187b9b75e/question_generator.py#L583-L606
from nltk.corpus import wordnet from nltk.stem.wordnet import WordNetLemmatizer import argparse import copy import cPickle as pkl import logger import os import re import subprocess import sys import time whiteListColorAdj = set(['red', 'yellow', 'orange', 'brown', 'green', 'blue', 'purple', 'black', 'white', 'gray', 'grey', 'violet']) whiteListLexname = set(['noun.animal', 'noun.artifact', 'noun.food', 'noun.object', 'noun.plant', 'noun.possession', 'noun.shape']) blackListColorNoun = set(['ride', 'riding', 'past', 'stand', 'standing', 'eating', 'holding', 'frosting', 'glow', 'glowing', 'resting', 'parked']) blackListNoun = set(['female', 'females', 'male', 'males', 'commuter', 'commuters', 'player', 'players', 'match', 'matches', 'rider', 'riders', 'doll', 'dolls', 'ride', 'rides', 'riding', 'past', 'pasts', 'teddy', 'fan', 'fans', 'street', 'streets', 'arm', 'arms', 'head', 'heads', 'slope', 'slopes', 'shoot', 'shoots', 'photo', 'photos', 'space', 'spaces', 'stand', 'stands', 'standing', 'cross', 'crosses', 'crossing', 'eating', 'walking', 'driving', 'upright', 'structure', 'turn', 'system', 'arrangement', 'set', 'top', 'while', 'well', 'area', 'produce', 'thing', 'things', 'cut', 'cuts', 'holding', 'frosting', 'glow', 'glowing', 'ground', 'parked']) blackListCompoundNoun = set(['tennis', 'delivery', 'soccer', 'baseball', 'fighter', 'mother', 'window']) blackListVerb = set(['sink', 'sinks', 'counter', 'counters', 'cupboard', 'cupboards', 'has', 'have', 'contain', 'contains', 'containing', 'contained', 'spaniel', 'spaniels', 'mirror', 'mirrors', 'shower', 'showers', 'stove', 'stoves', 'bowl', 'bowls', 'tile', 'tiles', 'mouthwash', 'mouthwashes', 'smoke', 'smokes']) blackListPrep = set(['with', 'of', 'in', 'down', 'as']) blackListLocation = set(['t-shirt', 't-shirts', 'jeans', 'shirt', 'shirts', 'uniform', 'uniforms', 'jacket', 'jackets', 'dress', 'dresses', 'hat', 'hats', 'tie', 'ties', 'costume', 'costumes', 'attire', 'attires', 'match', 'matches', 'coat', 'coats', 'cap', 'caps', 'gear', 'gears', 'sweatshirt', 'sweatshirts', 'helmet', 'helmets', 'clothing', 'clothings', 'cloth', 'clothes', 'blanket', 'blankets', 'enclosure', 'enclosures', 'suit', 'suits', 'photo', 'photos', 'picture', 'pictures', 'round', 'rounds', 'area', 'well', 'skirt', 'snowsuit', 'sunglasses', 'sweater', 'mask', 'frisbee', 'frisbees', 'shoe', 'umbrella', 'towel', 'scarf', 'phone', 'cellphone', 'motorcycle', 'device', 'computer', 'cake', 'hydrant', 'desk', 'stove', 'sculpture', 'lamp', 'fireplace', 'bags', 'laptop', 'trolley', 'toy', 'bus', 'counter', 'buffet', 'engine', 'graffiti', 'clock', 'jet', 'ramp', 'brick', 'taxi', 'knife', 'flag', 'screen', 'parked']) blackListVerbLocation = set(['sink', 'sinks', 'counter', 'counters', 'cupboard', 'cupboards', 'has', 'have', 'contain', 'contains', 'containing', 'contained', 'can', 'cans']) blackListNumberNoun = set(['pole', 'vase', 'kite', 'hay', 'shower', 'paddle', 'buffet', 'bicycle', 'bike', 'elephants']) synonymConvert = {'busses': 'buses', 'plane': 'airplane', 'planes': 'airplanes', 'aircraft': 'airplane', 'aircrafts': 'airplane', 'jetliner': 'airliner', 'jetliners': 'airliners', 'bike': 'bicycle', 'bikes': 'bicycles', 'cycle': 'bicycle', 'cycles': 'bicycles', 'motorbike': 'motorcycle', 'motorbikes': 'motorcycles', 'grey': 'gray', 'railroad': 'rail', 'cell': 'cellphone', 'doughnut': 'donut', 'doughnuts': 'donuts'} compoundNoun = set(['fighter jet', 'soccer ball', 'tennis ball']) charText = set(['.', ',', '-', '\'', '`', '/', '>', ':', ';', '\\', '!', '?', '&', '-', '=', '#', '$', '@', '_', '*', '+', '%', chr(194), chr(160)]) charClassName = set(['.', ',', '$', '\'', '`', ':', '-', '#']) lemmatizer = WordNetLemmatizer() log = logger.get() class TreeNode: def __init__(self, className, text, children, level): self.className = className self.text = text self.children = children self.level = level pass def __str__(self): strlist = [] for i in range(self.level): strlist.append(' ') strlist.extend(['(', self.className]) if len(self.children) > 0: strlist.append('\n') for child in self.children: strlist.append(child.__str__()) if len(self.text) > 0: for i in range(self.level + 1): strlist.append(' ') else: for i in range(self.level): strlist.append(' ') else: strlist.append(' ') strlist.append(self.text) strlist.append(')\n') return ''.join(strlist) def toSentence(self): strlist = [] for child in self.children: childSent = child.toSentence() if len(childSent) > 0: strlist.append(childSent) if len(self.text) > 0: strlist.append(self.text) return ' '.join(strlist) def relevel(self, level): self.level = level for child in self.children: child.relevel(level + 1) def copy(self): children = [] for child in self.children: children.append(child.copy()) return TreeNode(self.className, self.text, children, self.level) class TreeParser: def __init__(self): self.state = 0 self.currentClassStart = 0 self.currentTextStart = 0 self.classNameStack = [] self.childrenStack = [[]] self.root = None self.rootsList = [] self.level = 0 self.stateTable = [self.state0, self.state1, self.state2, self.state3, self.state4, self.state5, self.state6] self.raw = None self.state = 0 def parse(self, raw): if not self.isAlpha(raw[0]): self.raw = raw for i in range(len(raw)): self.state = self.stateTable[self.state](i) @staticmethod def isAlpha(c): return 65 <= ord(c) <= 90 or 97 <= ord(c) <= 122 @staticmethod def isNumber(c): return 48 <= ord(c) <= 57 @staticmethod def exception(raw, i): print raw raise Exception( 'Unexpected character "%c" (%d) at position %d' % (raw[i], ord(raw[i]), i)) @staticmethod def isClassName(s): if TreeParser.isAlpha(s) or s in charClassName: return True else: return False @staticmethod def isText(s): if TreeParser.isAlpha(s) or TreeParser.isNumber(s) or s in charText: return True else: return False def state0(self, i): if self.raw[i] == '(': return 1 else: return 0 def state1(self, i): if self.isClassName(self.raw[i]): self.currentClassStart = i self.level += 1 self.childrenStack.append([]) return 2 else: self.exception(self.raw, i) def state2(self, i): if self.isClassName(self.raw[i]): return 2 else: self.classNameStack.append(self.raw[self.currentClassStart:i]) if self.raw[i] == ' ' and self.raw[i + 1] == '(': return 0 elif self.raw[i] == ' ' and self.isText(self.raw[i + 1]): return 4 elif self.raw[i] == '\n': return 3 else: self.exception(self.raw, i) def state3(self, i): if self.raw[i] == ' ' and self.raw[i + 1] == '(': return 0 elif self.raw[i] == ' ' and self.raw[i + 1] == ' ': return 3 elif self.raw[i] == ' ' and self.isText(self.raw[i + 1]): return 4 else: return 3 def state4(self, i): if self.isText(self.raw[i]): self.currentTextStart = i return 5 else: self.exception(self.raw, i) def state5(self, i): if self.isText(self.raw[i]): return 5 elif i == len(self.raw) - 1: return 5 elif self.raw[i] == ')': self.wrapup(self.raw[self.currentTextStart:i]) if self.level == 0: return 0 elif self.raw[i + 1] == ')': return 6 else: return 3 else: self.exception(self.raw, i) def state6(self, i): if self.level == 0: return 0 elif self.raw[i] == ')': self.wrapup('') return 6 else: return 3 def wrapup(self, text): self.level -= 1 root = TreeNode(self.classNameStack[-1], text, self.childrenStack[-1][:], self.level) del self.childrenStack[-1] del self.classNameStack[-1] self.childrenStack[-1].append(root) if self.level == 0: self.rootsList.append(root) class QuestionGenerator: def __init__(self): self.lexnameDict = {} pass @staticmethod def escapeNumber(line): line = re.sub('^11$', 'eleven', line) line = re.sub('^12$', 'twelve', line) line = re.sub('^13$', 'thirteen', line) line = re.sub('^14$', 'fourteen', line) line = re.sub('^15$', 'fifteen', line) line = re.sub('^16$', 'sixteen', line) line = re.sub('^17$', 'seventeen', line) line = re.sub('^18$', 'eighteen', line) line = re.sub('^19$', 'nineteen', line) line = re.sub('^20$', 'twenty', line) line = re.sub('^10$', 'ten', line) line = re.sub('^0$', 'zero', line) line = re.sub('^1$', 'one', line) line = re.sub('^2$', 'two', line) line = re.sub('^3$', 'three', line) line = re.sub('^4$', 'four', line) line = re.sub('^5$', 'five', line) line = re.sub('^6$', 'six', line) line = re.sub('^7$', 'seven', line) line = re.sub('^8$', 'eight', line) line = re.sub('^9$', 'nine', line) return line def whMovement(self, root): stack = [[]] found = [False] def traverseFindTopClass(node, className): if not found[0]: stack[0].append(node) if node.className == className: found[0] = True else: for child in node.children: traverseFindTopClass(child, className) if not found[0]: del stack[0][-1] traverseFindTopClass(root, 'NP') topNoun = None if found[0]: np = stack[0][-1] while np.className != 'DT' and len(np.children) > 0: np = np.children[0] if np.className == 'DT' and np.text.lower() == 'a': np.text = 'the' np = stack[0][-1] def lookForNoun(np): if len(np.children) > 0: for child in np.children: answer = lookForNoun(child) if (answer != None): return answer return None else: if np.className == 'NN' or np.className == 'NNS': return np else: return None topNoun = lookForNoun(np) found[0] = False stack[0] = [] traverseFindTopClass(root, 'VP') topVP = None if found[0]: topVP = stack[0][-1] found[0] = False stack[0] = [] traverseFindTopClass(root, 'WHNP') if not found[0]: return False insideSBar = False insideNP = False insideVP = False whStack = stack[0][:] whPosition = len(whStack) - 1 for item in whStack: if item.className == 'SBAR': insideSBar = True elif item.className == 'NP' and item.level > 1: insideNP = True elif insideNP and item.className == 'VP': insideVP = True found[0] = False stack[0] = [] traverseFindTopClass(root, 'VP') node = root parent = root while len(node.children) > 0: parent = node node = node.children[0] if parent.className == 'WHNP': if found[0]: vpnode = stack[0][-1] vpchild = vpnode.children[0] frontWord = None if vpchild.className == 'VBG': verb = 'are' if root.answer.className == 'NNS' else 'is' verbnode = TreeNode('VB', verb, [], vpchild.level) vpnode.children.insert(0, verbnode) return True if insideSBar: return False if insideVP: return False if not found[0]: return False vpnode = stack[0][-1] vpchild = vpnode.children[0] frontWord = None if vpchild.className == 'VBZ': if vpchild.text == 'is': frontWord = vpchild vpnode.children.remove(vpchild) elif vpchild.text == 'has': done = False for child in vpnode.children: if child.className == 'VP': done = True break if done: frontWord = vpchild vpnode.children.remove(vpchild) else: frontWord = TreeNode('VBZ', 'does', [], 0) vpchild.text = 'have' vpchild.className = 'VB' else: frontWord = TreeNode('VBZ', 'does', [], 0) vpchild.className = 'VB' vpchild.text = lemmatizer.lemmatize(vpchild.text, 'v') pass elif vpchild.className == 'VBP': if vpchild.text == 'are': frontWord = vpchild vpnode.children.remove(vpchild) else: frontWord = TreeNode('VBP', 'do', [], 0) vpchild.className = 'VB' pass elif vpchild.className == 'VBD': if vpchild.text == 'was' or vpchild.text == 'were': frontWord = vpchild vpnode.children.remove(vpchild) elif vpchild.text == 'had': done = False for child in vpnode.children: if child.className == 'VP': done = True break if done: frontWord = vpchild vpnode.children.remove(vpchild) else: frontWord = TreeNode('VBD', 'did', [], 0) vpchild.text = 'have' vpchild.className = 'VB' else: frontWord = TreeNode('VBD', 'did', [], 0) vpchild.className = 'VB' vpchild.text = lemmatizer.lemmatize(vpchild.text, 'v') pass elif vpchild.className == 'MD': frontWord = vpchild vpnode.children.remove(vpchild) pass elif vpchild.className == 'VBG': verb = 'are' if topNoun != None and topNoun.className == 'NNS' else 'is' frontWord = TreeNode('VBZ', verb, [], 0) if frontWord is None: return False whStack[whPosition - 1].children.remove(whStack[whPosition]) bigS = TreeNode('S', '', [whStack[whPosition], stack[0][1]], 0) stack[0][0].children = [bigS] bigS.children[1].children.insert(0, frontWord) root.relevel(0) return True
MIT License
googleapis/python-aiplatform
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py
SpecialistPoolServiceGrpcAsyncIOTransport.__init__
python
def __init__( self, *, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._prep_wrapped_messages(client_info)
Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed.
https://github.com/googleapis/python-aiplatform/blob/c1c2326b2342ab1b6f4c4ce3852e63376eae740d/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py#L100-L228
import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.api_core import operations_v1 from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import packaging.version import grpc from grpc.experimental import aio from google.cloud.aiplatform_v1beta1.types import specialist_pool from google.cloud.aiplatform_v1beta1.types import specialist_pool_service from google.longrunning import operations_pb2 from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO from .grpc import SpecialistPoolServiceGrpcTransport class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, )
Apache License 2.0
liuanji/wu-uct
Policy/PPO/PPOPolicy.py
AtariCNN.__init__
python
def __init__(self, num_actions, device): super().__init__() self.conv = nn.Sequential(nn.Conv2d(4, 32, 8, stride=4), nn.ReLU(inplace=True), nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, stride=1), nn.ReLU(inplace=True)) self.fc = nn.Sequential(nn.Linear(64 * 7 * 7, 512), nn.ReLU(inplace=True)) self.pi = nn.Linear(512, num_actions) self.v = nn.Linear(512, 1) self.num_actions = num_actions self.device = device
Basic convolutional actor-critic network for Atari 2600 games Equivalent to the network in the original DQN paper. Args: num_actions (int): the number of available discrete actions
https://github.com/liuanji/wu-uct/blob/a185cf11feef548ca3f7f59bec4f9d8a92c8a2a1/Policy/PPO/PPOPolicy.py#L127-L152
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import torch.optim as optim import time import os from Env.AtariEnv.atari_wrappers import LazyFrames def ortho_weights(shape, scale=1.): shape = tuple(shape) if len(shape) == 2: flat_shape = shape[1], shape[0] elif len(shape) == 4: flat_shape = (np.prod(shape[1:]), shape[0]) else: raise NotImplementedError a = np.random.normal(0., 1., flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v q = q.transpose().copy().reshape(shape) if len(shape) == 2: return torch.from_numpy((scale * q).astype(np.float32)) if len(shape) == 4: return torch.from_numpy((scale * q[:, :shape[1], :shape[2]]).astype(np.float32)) def atari_initializer(module): classname = module.__class__.__name__ if classname == 'Linear': module.weight.data = ortho_weights(module.weight.data.size(), scale=np.sqrt(2.)) module.bias.data.zero_() elif classname == 'Conv2d': module.weight.data = ortho_weights(module.weight.data.size(), scale=np.sqrt(2.)) module.bias.data.zero_() elif classname == 'LSTM': for name, param in module.named_parameters(): if 'weight_ih' in name: param.data = ortho_weights(param.data.size(), scale=1.) if 'weight_hh' in name: param.data = ortho_weights(param.data.size(), scale=1.) if 'bias' in name: param.data.zero_() class PPOAtariCNN(): def __init__(self, num_actions, device = "cpu", checkpoint_dir = ""): self.num_actions = num_actions self.device = torch.device(device) self.checkpoint_dir = checkpoint_dir self.model = AtariCNN(num_actions, self.device) if checkpoint_dir != "" and os.path.exists(checkpoint_dir): checkpoint = torch.load(checkpoint_dir, map_location = "cpu") self.model.load_state_dict(checkpoint["policy"]) self.model.to(device) def get_action(self, state, logit = False): return self.model.get_action(state, logit = logit) def get_value(self, state): return self.model.get_value(state) class PPOSmallAtariCNN(): def __init__(self, num_actions, device = "cpu", checkpoint_dir = ""): self.num_actions = num_actions self.device = torch.device(device) self.checkpoint_dir = checkpoint_dir self.model = SmallPolicyAtariCNN(num_actions, self.device) if checkpoint_dir != "" and os.path.exists(checkpoint_dir): checkpoint = torch.load(checkpoint_dir, map_location = "cpu") self.model.to(device) self.optimizer = optim.Adam(self.model.parameters(), lr = 1e-3) self.mseLoss = nn.MSELoss() def get_action(self, state): return self.model.get_action(state) def get_value(self, state): return self.model.get_value(state) def train_step(self, state_batch, policy_batch, value_batch, temperature = 2.5): self.optimizer.zero_grad() out_policy, out_value = self.model(state_batch) loss = self.mseLoss(policy_batch, out_policy) + self.mseLoss(value_batch, out_value) loss.backward() self.optimizer.step() return loss.detach().cpu().numpy() def save(self, path): torch.save({"policy": self.model.state_dict()}, path) class AtariCNN(nn.Module):
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/ads/__init__.py
AdsHub.__init__
python
def __init__(self, ads_client): self._client = ads_client self._client.open() self._devices = [] self._notification_items = {} self._lock = threading.Lock()
Initialize the ADS hub.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/ads/__init__.py#L116-L124
import threading import struct import logging import ctypes from collections import namedtuple import voluptuous as vol from homeassistant.const import CONF_DEVICE, CONF_PORT, CONF_IP_ADDRESS, EVENT_HOMEASSISTANT_STOP import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pyads==2.2.6'] _LOGGER = logging.getLogger(__name__) DATA_ADS = 'data_ads' ADSTYPE_INT = 'int' ADSTYPE_UINT = 'uint' ADSTYPE_BYTE = 'byte' ADSTYPE_BOOL = 'bool' DOMAIN = 'ads' CONF_ADS_VAR = 'adsvar' CONF_ADS_VAR_BRIGHTNESS = 'adsvar_brightness' CONF_ADS_TYPE = 'adstype' CONF_ADS_FACTOR = 'factor' CONF_ADS_VALUE = 'value' SERVICE_WRITE_DATA_BY_NAME = 'write_data_by_name' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_DEVICE): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_IP_ADDRESS): cv.string, }) }, extra=vol.ALLOW_EXTRA) SCHEMA_SERVICE_WRITE_DATA_BY_NAME = vol.Schema({ vol.Required(CONF_ADS_TYPE): vol.In([ADSTYPE_INT, ADSTYPE_UINT, ADSTYPE_BYTE]), vol.Required(CONF_ADS_VALUE): cv.match_all, vol.Required(CONF_ADS_VAR): cv.string, }) def setup(hass, config): import pyads conf = config[DOMAIN] net_id = conf.get(CONF_DEVICE) ip_address = conf.get(CONF_IP_ADDRESS) port = conf.get(CONF_PORT) client = pyads.Connection(net_id, port, ip_address) AdsHub.ADS_TYPEMAP = { ADSTYPE_BOOL: pyads.PLCTYPE_BOOL, ADSTYPE_BYTE: pyads.PLCTYPE_BYTE, ADSTYPE_INT: pyads.PLCTYPE_INT, ADSTYPE_UINT: pyads.PLCTYPE_UINT, } AdsHub.PLCTYPE_BOOL = pyads.PLCTYPE_BOOL AdsHub.PLCTYPE_BYTE = pyads.PLCTYPE_BYTE AdsHub.PLCTYPE_INT = pyads.PLCTYPE_INT AdsHub.PLCTYPE_UINT = pyads.PLCTYPE_UINT AdsHub.ADSError = pyads.ADSError try: ads = AdsHub(client) except pyads.pyads.ADSError: _LOGGER.error( "Could not connect to ADS host (netid=%s, port=%s)", net_id, port) return False hass.data[DATA_ADS] = ads hass.bus.listen(EVENT_HOMEASSISTANT_STOP, ads.shutdown) def handle_write_data_by_name(call): ads_var = call.data.get(CONF_ADS_VAR) ads_type = call.data.get(CONF_ADS_TYPE) value = call.data.get(CONF_ADS_VALUE) try: ads.write_by_name(ads_var, value, ads.ADS_TYPEMAP[ads_type]) except pyads.ADSError as err: _LOGGER.error(err) hass.services.register( DOMAIN, SERVICE_WRITE_DATA_BY_NAME, handle_write_data_by_name, schema=SCHEMA_SERVICE_WRITE_DATA_BY_NAME) return True NotificationItem = namedtuple( 'NotificationItem', 'hnotify huser name plc_datatype callback' ) class AdsHub(object):
MIT License
buildinspace/peru
peru/resources/plugins/curl/curl_plugin.py
get_request_filename
python
def get_request_filename(request): if 'Content-Disposition' in request.info(): disposition = request.info()['Content-Disposition'] pieces = re.split(r'\s*;\s*', disposition) for piece in pieces: if piece.startswith('filename='): filename = piece[len('filename='):] if filename.startswith('"'): filename = filename[1:] if filename.endswith('"'): filename = filename[:-1] filename = filename.replace('\\"', '"') return filename return os.path.basename(urlsplit(request.url).path) or 'index.html'
Figure out the filename for an HTTP download.
https://github.com/buildinspace/peru/blob/e9ba6e0024ea08105a8d027f958899cca39aeb9a/peru/resources/plugins/curl/curl_plugin.py#L16-L34
import hashlib import os import pathlib import re import stat import sys import tarfile from urllib.error import HTTPError, URLError from urllib.parse import urlsplit import urllib.request import zipfile
MIT License
weso/cwr-dataapi
cwr/other.py
VISAN.isan
python
def isan(self): return self._isan
Returns the ISAN code :return: the ISAN code
https://github.com/weso/cwr-dataapi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/other.py#L221-L226
__author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class _ThreePartsCode(object): _code_size = 9 def __init__(self, header, id_code, check_digit ): self._header = header self._id_code = id_code self._check_digit = check_digit def __str__(self): return '%s-%s-%s' % (self._header, self._printable_id_code(), self._check_digit) def __repr__(self): return '<class %s>(header=%r, id_code=%r, check_digit=%r)' % ('ThreePartsCode', self._header, self._id_code, self._check_digit) def _printable_id_code(self): code = str(self.id_code) while len(code) < self._code_size: code = '0' + code return code @property def header(self): return self._header @header.setter def header(self, value): self._header = value @property def id_code(self): return self._id_code @id_code.setter def id_code(self, value): self._id_code = value @property def check_digit(self): return self._check_digit @check_digit.setter def check_digit(self, value): self._check_digit = value class ISWCCode(_ThreePartsCode): def __init__(self, id_code, check_digit ): super(ISWCCode, self).__init__( 'T', id_code, check_digit ) def __str__(self): return 'ISWC T-%s-%s' % (self._printable_id_code(), self._check_digit) def __repr__(self): return '<class %s>(id_code=%r, check_digit=%r)' % ('ISWCCode', self._id_code, self._check_digit) def _printable_id_code(self): code = super(ISWCCode, self)._printable_id_code() code1 = code[:3] code2 = code[3:6] code3 = code[-3:] return '%s.%s.%s' % (code1, code2, code3) class IPIBaseNumber(_ThreePartsCode): def __init__(self, header, id_code, check_digit ): super(IPIBaseNumber, self).__init__( header, id_code, check_digit ) def __str__(self): return '%s-%s-%s' % ( self.header, self._printable_id_code(), self.check_digit) def __repr__(self): return '<class %s>(header=%r, id_code=%r, check_digit=%r)' % ( 'IPIBaseNumber', self._header, self._id_code, self._check_digit) class VISAN(object): def __init__(self, version, isan, episode, check_digit ): self._version = version self._isan = isan self._episode = episode self._check_digit = check_digit @property def check_digit(self): return self._check_digit @check_digit.setter def check_digit(self, value): self._check_digit = value @property def episode(self): return self._episode @episode.setter def episode(self, value): self._episode = value @property
MIT License
rroller/dahua
custom_components/dahua/client.py
DahuaClient.get_bytes
python
async def get_bytes(self, url: str) -> bytes: async with async_timeout.timeout(TIMEOUT_SECONDS): response = None try: auth = DigestAuth(self._username, self._password, self._session) response = await auth.request("GET", self._base + url) response.raise_for_status() return await response.read() finally: if response is not None: response.close()
Get information from the API. This will return the raw response and not process it
https://github.com/rroller/dahua/blob/83c09300098b6e446d218a60280a7d5babfd2c6f/custom_components/dahua/client.py#L600-L612
import logging import socket import asyncio import aiohttp import async_timeout from custom_components.dahua.const import STREAM_MAIN, STREAM_SUB from .digest import DigestAuth _LOGGER: logging.Logger = logging.getLogger(__package__) TIMEOUT_SECONDS = 10 SECURITY_LIGHT_TYPE = 1 SIREN_TYPE = 2 class DahuaClient: def __init__( self, username: str, password: str, address: str, port: int, rtsp_port: int, session: aiohttp.ClientSession ) -> None: self._username = username self._password = password self._address = address self._session = session self._port = port self._rtsp_port = rtsp_port protocol = "https" if int(port) == 443 else "http" self._base = "{0}://{1}:{2}".format(protocol, address, port) def get_rtsp_stream_url(self, channel: int, subtype: int) -> str: url = "rtsp://{0}:{1}@{2}:{3}/cam/realmonitor?channel={4}&subtype={5}".format( self._username, self._password, self._address, self._rtsp_port, channel, subtype, ) return url async def async_get_snapshot(self, channel_number: int) -> bytes: url = "/cgi-bin/snapshot.cgi?channel={0}".format(channel_number) return await self.get_bytes(url) async def async_get_system_info(self) -> dict: url = "/cgi-bin/magicBox.cgi?action=getSystemInfo" return await self.get(url) async def get_device_type(self) -> dict: url = "/cgi-bin/magicBox.cgi?action=getDeviceType" return await self.get(url) async def get_software_version(self) -> dict: url = "/cgi-bin/magicBox.cgi?action=getSoftwareVersion" return await self.get(url) async def get_machine_name(self) -> dict: url = "/cgi-bin/magicBox.cgi?action=getMachineName" return await self.get(url) async def get_vendor(self) -> dict: url = "/cgi-bin/magicBox.cgi?action=getVendor" return await self.get(url) async def async_get_coaxial_control_io_status(self) -> dict: url = "/cgi-bin/coaxialControlIO.cgi?action=getStatus&channel=1" return await self.get(url) async def async_get_lighting_v2(self) -> dict: url = "/cgi-bin/configManager.cgi?action=getConfig&name=Lighting_V2" return await self.get(url) async def async_get_machine_name(self) -> dict: url = "/cgi-bin/configManager.cgi?action=getConfig&name=General" return await self.get(url) async def async_get_config(self, name) -> dict: url = "/cgi-bin/configManager.cgi?action=getConfig&name={0}".format(name) return await self.get(url) async def async_get_config_lighting(self, channel: int, profile_mode) -> dict: try: return await self.async_get_config("Lighting[{0}][{1}]".format(channel, profile_mode)) except aiohttp.ClientResponseError as e: if e.status == 400: return {} raise e async def async_get_config_motion_detection(self) -> dict: return await self.async_get_config("MotionDetect") async def async_get_ivs_rules(self): return await self.async_get_config("VideoAnalyseRule") async def async_set_all_ivs_rules(self, channel: int, enabled: bool): rules = await self.async_get_ivs_rules() rules_set = [] for index in range(10): rule = "table.VideoAnalyseRule[{0}][{1}].Enable".format(channel, index) if rule in rules: rules_set.append("VideoAnalyseRule[{0}][{1}].Enable={2}".format(channel, index, str(enabled).lower())) if len(rules_set) > 0: url = "/cgi-bin/configManager.cgi?action=setConfig&" + "&".join(rules_set) return await self.get(url, True) async def async_set_ivs_rule(self, channel: int, index: int, enabled: bool): url = "/cgi-bin/configManager.cgi?action=setConfig&VideoAnalyseRule[{0}][{1}].Enable={2}".format( channel, index, str(enabled).lower() ) return await self.get(url, True) async def async_set_lighting_v1(self, channel: int, enabled: bool, brightness: int) -> dict: mode = "Manual" if not enabled: mode = "Off" return await self.async_set_lighting_v1_mode(channel, mode, brightness) async def async_set_lighting_v1_mode(self, channel: int, mode: str, brightness: int) -> dict: if mode.lower() == "on": mode = "Manual" mode = mode.capitalize() url = "/cgi-bin/configManager.cgi?action=setConfig&Lighting[{channel}][0].Mode={mode}&Lighting[{channel}][0].MiddleLight[0].Light={brightness}".format( channel=channel, mode=mode, brightness=brightness ) return await self.get(url) async def async_set_video_profile_mode(self, channel: int, mode: str): if mode.lower() == "night": mode = "1" else: mode = "0" url = "/cgi-bin/configManager.cgi?action=setConfig&VideoInMode[{0}].Config[0]={1}".format(channel, mode) return await self.get(url, True) async def async_set_night_switch_mode(self, channel: int, mode: str): if mode.lower() == "night": mode = "3" else: mode = "0" url = f"/cgi-bin/configManager.cgi?action=setConfig&VideoInOptions[{channel}].NightOptions.SwitchMode={mode}" _LOGGER.debug("Switching night mode: %s", url) return await self.get(url, True) async def async_enable_channel_title(self, channel: int, enabled: bool, ): url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].ChannelTitle.EncodeBlend={1}".format( channel, str(enabled).lower() ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could enable/disable channel title") async def async_enable_time_overlay(self, channel: int, enabled: bool): url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].TimeTitle.EncodeBlend={1}".format( channel, str(enabled).lower() ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not enable/disable time overlay") async def async_enable_text_overlay(self, channel: int, group: int, enabled: bool): url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].CustomTitle[{1}].EncodeBlend={2}".format( channel, group, str(enabled).lower() ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not enable/disable text overlay") async def async_enable_custom_overlay(self, channel: int, group: int, enabled: bool): url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].UserDefinedTitle[{1}].EncodeBlend={2}".format( channel, group, str(enabled).lower() ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not enable/disable customer overlay") async def async_set_service_set_channel_title(self, channel: int, text1: str, text2: str): text = '|'.join(filter(None, [text1, text2])) url = "/cgi-bin/configManager.cgi?action=setConfig&ChannelTitle[{0}].Name={1}".format( channel, text ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not set text") async def async_set_service_set_text_overlay(self, channel: int, group: int, text1: str, text2: str, text3: str, text4: str): text = '|'.join(filter(None, [text1, text2, text3, text4])) url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].CustomTitle[{1}].Text={2}".format( channel, group, text ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not set text") async def async_set_service_set_custom_overlay(self, channel: int, group: int, text1: str, text2: str): text = '|'.join(filter(None, [text1, text2])) url = "/cgi-bin/configManager.cgi?action=setConfig&VideoWidget[{0}].UserDefinedTitle[{1}].Text={2}".format( channel, group, text ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not set text") async def async_set_lighting_v2(self, channel: int, enabled: bool, brightness: int, profile_mode: str) -> dict: mode = "Manual" if not enabled: mode = "Off" url = "/cgi-bin/configManager.cgi?action=setConfig&Lighting_V2[{channel}][{profile_mode}][0].Mode={mode}&Lighting_V2[{channel}][{profile_mode}][0].MiddleLight[0].Light={brightness}".format( channel=channel, profile_mode=profile_mode, mode=mode, brightness=brightness ) _LOGGER.debug("Turning light on: %s", url) return await self.get(url) async def async_set_video_in_day_night_mode(self, channel: int, config_type: str, mode: str): if config_type == "day": config_no = 0 elif config_type == "night": config_no = 1 else: config_no = 2 if mode is None or mode.lower() == "auto" or mode.lower() == "brightness": mode = "Brightness" elif mode.lower() == "color": mode = "Color" elif mode.lower() == "blackwhite": mode = "BlackWhite" url = "/cgi-bin/configManager.cgi?action=setConfig&VideoInDayNight[{0}][{1}].Mode={2}".format( channel, str(config_no), mode ) value = await self.get(url) if "OK" not in value and "ok" not in value: raise Exception("Could not set Day/Night mode") async def async_get_video_in_mode(self) -> dict: url = "/cgi-bin/configManager.cgi?action=getConfig&name=VideoInMode" return await self.get(url) async def async_set_coaxial_control_state(self, channel: int, dahua_type: int, enabled: bool) -> dict: io = "1" if not enabled: io = "2" url = "/cgi-bin/coaxialControlIO.cgi?action=control&channel={channel}&info[0].Type={dahua_type}&info[0].IO={io}".format( channel=channel, dahua_type=dahua_type, io=io) _LOGGER.debug("Setting coaxial control state to %s: %s", io, url) return await self.get(url) async def async_set_disarming_linkage(self, channel: int, enabled: bool) -> dict: value = "false" if enabled: value = "true" url = "/cgi-bin/configManager.cgi?action=setConfig&DisableLinkage[{0}].Enable={1}".format(channel, value) return await self.get(url) async def async_set_record_mode(self, channel: int, mode: str) -> dict: if mode.lower() == "auto": mode = "0" elif mode.lower() == "manual" or mode.lower() == "on": mode = "1" elif mode.lower() == "off": mode = "2" url = "/cgi-bin/configManager.cgi?action=setConfig&RecordMode[{0}].Mode={1}".format(channel, mode) _LOGGER.debug("Setting record mode: %s", url) return await self.get(url) async def async_get_disarming_linkage(self) -> dict: url = "/cgi-bin/configManager.cgi?action=getConfig&name=DisableLinkage" return await self.get(url) async def async_access_control_open_door(self, door_id: int = 1) -> dict: url = "/cgi-bin/accessControl.cgi?action=openDoor&UserID=101&Type=Remote&channel={0}".format(door_id) return await self.get(url) async def enable_motion_detection(self, channel: int, enabled: bool) -> dict: url = "/cgi-bin/configManager.cgi?action=setConfig&MotionDetect[{channel}].Enable={enabled}&MotionDetect[{channel}].DetectVersion=V3.0".format( channel=channel, enabled=str(enabled).lower()) response = await self.get(url) if "OK" in response: return response url = "/cgi-bin/configManager.cgi?action=setConfig&MotionDetect[{0}].Enable={1}".format(channel, str(enabled).lower()) response = await self.get(url) return response async def stream_events(self, on_receive, events: list, channel: int): codes = ",".join(events) url = "{0}/cgi-bin/eventManager.cgi?action=attach&codes=[{1}]&heartbeat=5".format(self._base, codes) if self._username is not None and self._password is not None: response = None try: auth = DigestAuth(self._username, self._password, self._session) response = await auth.request("GET", url) response.raise_for_status() async for data, _ in response.content.iter_chunks(): on_receive(data, channel) except Exception as exception: raise ConnectionError() from exception finally: if response is not None: response.close() @staticmethod async def parse_dahua_api_response(data: str) -> dict: lines = data.splitlines() data_dict = {} for line in lines: parts = line.split("=", 1) if len(parts) == 2: data_dict[parts[0]] = parts[1] else: data_dict[parts[0]] = line return data_dict
MIT License
dkoslicki/cmash
CMash/GroundTruth.py
TrueContainment.__parseNumList
python
def __parseNumList(k_sizes_str: str) -> list: m = re.match(r'(\d+)(?:-(\d+))?(?:-(\d+))?$', k_sizes_str) if not m: raise ArgumentTypeError( "'" + k_sizes_str + "' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.") start = int(m.group(1)) end = int(m.group(2)) if m.group(3): increment = int(m.group(3)) else: increment = 1 return list(range(start, end + 1, increment))
Parses a string like 10-21-1 and turn it into a list like [10, 11, 12,...,21] :param k_sizes_str: the <start>-<end>-<increment> string :type k_sizes_str: str :return: list of k-mer sizes :rtype: list
https://github.com/dkoslicki/cmash/blob/1485a0cde4685c52ae8cfb8bdaa5d1bad77eaac3/CMash/GroundTruth.py#L74-L93
import khmer import numpy as np import os import sys import pandas as pd import re import screed from argparse import ArgumentTypeError import multiprocessing import argparse import subprocess import json from itertools import starmap import tempfile try: from CMash import MinHash as MH from CMash import Query except ImportError: try: import MinHash as MH import Query except ImportError: sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from CMash import MinHash as MH from CMash import Query notACTG = re.compile('[^ACTG]') class TrueContainment: def __init__(self, training_database_file: str, k_sizes: str, temp_dir: str): self.training_database_file = training_database_file self.k_sizes = self.__parseNumList(k_sizes) self.CEs = self.__import_database() self.training_file_names = self.__return_file_names() self.temp_dir = temp_dir if not os.path.exists(temp_dir): os.mkdir(temp_dir) self._compute_all_training_kmers() def __import_database(self) -> list: CEs = MH.import_multiple_from_single_hdf5(self.training_database_file) return CEs def __return_file_names(self): training_file_names = list(map(lambda x: x.input_file_name.decode('utf-8'), self.CEs)) return training_file_names @staticmethod
BSD 3-Clause New or Revised License
spikeinterface/spikeextractors
spikeextractors/recordingextractor.py
RecordingExtractor.get_num_channels
python
def get_num_channels(self): return len(self.get_channel_ids())
This function returns the number of channels in the recording. Returns ------- num_channels: int Number of channels in the recording
https://github.com/spikeinterface/spikeextractors/blob/4097f6f22f5aa4811cd85d5d8a9d25b94312339d/spikeextractors/recordingextractor.py#L95-L103
from abc import ABC, abstractmethod import numpy as np from copy import deepcopy from .extraction_tools import load_probe_file, save_to_probe_file, write_to_binary_dat_format, write_to_h5_dataset_format, get_sub_extractors_by_property, cast_start_end_frame from .baseextractor import BaseExtractor class RecordingExtractor(ABC, BaseExtractor): _default_filename = "spikeinterface_recording" def __init__(self): BaseExtractor.__init__(self) self._key_properties = {'group': None, 'location': None, 'gain': None, 'offset': None} self.is_filtered = False @abstractmethod def get_traces(self, channel_ids=None, start_frame=None, end_frame=None, return_scaled=True): pass @abstractmethod def get_num_frames(self): pass @abstractmethod def get_sampling_frequency(self): pass @abstractmethod def get_channel_ids(self): pass
MIT License
rethinkrobotics/intera_sdk
intera_interface/src/intera_motion_interface/motion_waypoint.py
MotionWaypoint.get_default_active_endpoint
python
def get_default_active_endpoint(): return 'right_hand'
@return: the active endpoint string corresponding to the tip of sawyer's arm when nothing else is attached.
https://github.com/rethinkrobotics/intera_sdk/blob/6614dec1c5c2e7a74db1af6d01811d1332801785/intera_interface/src/intera_motion_interface/motion_waypoint.py#L48-L53
import rospy from geometry_msgs.msg import ( PoseStamped, Pose ) from intera_motion_msgs.msg import ( Waypoint, WaypointOptions ) from sensor_msgs.msg import JointState from copy import deepcopy from .utility_functions import ensure_path_to_file_exists from rospy_message_converter import message_converter import yaml from .motion_waypoint_options import MotionWaypointOptions from intera_interface import Limb class MotionWaypoint(object): @staticmethod def get_default_joint_angles(): return [0.0, -0.9, 0.0, 1.8, 0.0, -0.9, 0.0] @staticmethod
Apache License 2.0
berkeleyphotonicsgenerator/bpg
tests/test_flatten.py
SubLevel2.draw_layout
python
def draw_layout(self): self.add_rect( layer='SI', coord1=(0, 0), coord2=(2, 4), unit_mode=False ) self.add_photonic_port(name='Sublevel2', center=(0, 0), orient='R0', layer='SI', width=1)
Specifies the creation of the lumerical shapes
https://github.com/berkeleyphotonicsgenerator/bpg/blob/27221c9bbfd5e25547ad048fcbfacf940db9ac8c/tests/test_flatten.py#L24-L37
import BPG class SubLevel2(BPG.PhotonicTemplateBase): def __init__(self, temp_db, lib_name, params, used_names, **kwargs, ): BPG.PhotonicTemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs) @classmethod def get_params_info(cls): return dict( ) @classmethod def get_default_param_values(cls): return dict( )
BSD 3-Clause New or Revised License
cryptosignal/crypto-signal
app/notifiers/gmail_client.py
GmailNotifier.notify
python
def notify(self, message): header = 'From: %s\n' % self.username header += 'To: %s\n' % self.destination_addresses header += 'Content-Type: text/plain\n' header += 'Subject: Crypto-signal alert!\n\n' message = header + message smtp_handler = smtplib.SMTP(self.smtp_server) smtp_handler.starttls() smtp_handler.login(self.username, self.password) result = smtp_handler.sendmail(self.username, self.destination_addresses, message) smtp_handler.quit() return result
Sends the message. Args: message (str): The message to send. Returns: dict: A dictionary containing the result of the attempt to send the email.
https://github.com/cryptosignal/crypto-signal/blob/8769d0df2c50e5071b282300788a3860200b22c6/app/notifiers/gmail_client.py#L32-L53
import smtplib import structlog from tenacity import retry, retry_if_exception_type, stop_after_attempt from notifiers.utils import NotifierUtils class GmailNotifier(NotifierUtils): def __init__(self, username, password, destination_addresses): self.logger = structlog.get_logger() self.smtp_server = 'smtp.gmail.com:587' self.username = username self.password = password self.destination_addresses = ','.join(destination_addresses) @retry(stop=stop_after_attempt(3))
MIT License
b-ryan/powerline-shell
powerline_shell/segments/hg.py
_get_hg_status
python
def _get_hg_status(output): return output[0].decode("utf-8").splitlines()
This function exists to enable mocking the `hg status` output in tests.
https://github.com/b-ryan/powerline-shell/blob/a9b8c9bb39dbfb7ec3c639e497b5a76fa6dcb8cc/powerline_shell/segments/hg.py#L26-L29
import subprocess from ..utils import RepoStats, ThreadedSegment, get_subprocess_env def _get_hg_branch(): p = subprocess.Popen(["hg", "branch"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=get_subprocess_env()) branch = p.communicate()[0].decode("utf-8").rstrip('\n') return branch def parse_hg_stats(status): stats = RepoStats() for statusline in status: if statusline[0] == "A": stats.staged += 1 elif statusline[0] == "?": stats.new += 1 else: stats.changed += 1 return stats
MIT License
cozysynthesizer/cozy
cozy/jobs.py
Job.successful
python
def successful(self): return self._flags[Job._DONE_NORMALLY_FLAG]
True if the job has stopped without throwing an uncaught exception.
https://github.com/cozysynthesizer/cozy/blob/d7b2c0ee575057dea4ebec201d579f0ecd785b1b/cozy/jobs.py#L192-L194
import os import multiprocessing from queue import Queue as PlainQueue, Empty, Full import threading import signal from cozy.common import partition import cozy.opts as opts do_profiling = opts.Option("profile", bool, False, description="Profile Cozy itself") _interrupted = False def _set_interrupt_flag(signal_number, stack_frame): global _interrupted _interrupted = True def install_graceful_sigint_handler(): signal.signal(signal.SIGINT, _set_interrupt_flag) def was_interrupted(): return _interrupted multiprocessing_context = multiprocessing.get_context("spawn") class Job(object): _STOP_REQUESTED_FLAG = 0 _DONE_FLAG = 1 _DONE_NORMALLY_FLAG = 2 _SIGINT_HANDLER_INSTALLED_FLAG = 3 _FLAG_COUNT = 4 def __init__(self): self._thread = multiprocessing_context.Process(target=self._run, daemon=True) self._flags = multiprocessing_context.Array("b", [False] * Job._FLAG_COUNT) def start(self): self._options = opts.snapshot() self._thread.start() def run(self): raise NotImplementedError() def _run(self): opts.restore(self._options) install_graceful_sigint_handler() self._flags[Job._SIGINT_HANDLER_INSTALLED_FLAG] = True try: if do_profiling.value: import cProfile import tempfile (fd, filename) = tempfile.mkstemp(suffix=".prof") print("Profile info: {}".format(filename)) cProfile.runctx("self.run()", globals(), locals(), filename=filename) else: self.run() self._flags[Job._DONE_NORMALLY_FLAG] = True except Exception as e: import traceback traceback.print_exc() finally: self._flags[Job._DONE_FLAG] = True @property def stop_requested(self): return was_interrupted() or self._flags[Job._STOP_REQUESTED_FLAG] @property def done(self): return self._flags[Job._DONE_FLAG] or (self._thread.exitcode is not None) @property
Apache License 2.0
google-research/language
language/labs/consistent_zero_shot_nmt/utils/model_utils.py
gnmt_residual_fn
python
def gnmt_residual_fn(inputs, outputs): def split_input(inp, out): out_dim = out.get_shape().as_list()[-1] inp_dim = inp.get_shape().as_list()[-1] return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1) actual_inputs, _ = contrib_framework.nest.map_structure( split_input, inputs, outputs) def assert_shape_match(inp, out): inp.get_shape().assert_is_compatible_with(out.get_shape()) contrib_framework.nest.assert_same_structure(actual_inputs, outputs) contrib_framework.nest.map_structure(assert_shape_match, actual_inputs, outputs) return contrib_framework.nest.map_structure(lambda inp, out: inp + out, actual_inputs, outputs)
Residual function that handles different inputs and outputs inner dims. Args: inputs: A potentially nested structure of <tensor> [..., input_dim] that represents cell inputs. outputs: A potentially nested structure of <tensor> [..., output_dim] that represents cell outputs. Must have the same structure and number of dimensions as inputs and output_dim >= input_dim must hold. Returns: outputs + actual_inputs where actual_inputs are a nested structure of slices of the inputs along the last dimension up to the output_dim.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/labs/consistent_zero_shot_nmt/utils/model_utils.py#L163-L191
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensor2tensor.layers import common_layers import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import rnn as contrib_rnn from tensorflow.contrib import seq2seq as contrib_seq2seq __all__ = [ "GNMTAttentionMultiCell", "gnmt_residual_fn", "create_rnn_cell", "create_gnmt_rnn_cell", "build_unidirectional_rnn", "build_bidirectional_rnn", "make_sequences_compatible", "get_embeddings", "build_logits", "get_global_step", ] def _single_cell(unit_type, num_units, forget_bias, dropout, mode, residual_connection=False, residual_fn=None, trainable=True): dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0 if unit_type == "lstm": single_cell = contrib_rnn.LSTMCell( num_units, forget_bias=forget_bias, trainable=trainable) elif unit_type == "gru": single_cell = contrib_rnn.GRUCell(num_units, trainable=trainable) elif unit_type == "layer_norm_lstm": single_cell = contrib_rnn.LayerNormBasicLSTMCell( num_units, forget_bias=forget_bias, layer_norm=True, trainable=trainable) elif unit_type == "nas": single_cell = contrib_rnn.NASCell(num_units, trainable=trainable) else: raise ValueError("Unknown unit type %s!" % unit_type) if dropout > 0.0: single_cell = contrib_rnn.DropoutWrapper( cell=single_cell, input_keep_prob=(1.0 - dropout)) if residual_connection: single_cell = contrib_rnn.ResidualWrapper( single_cell, residual_fn=residual_fn) return single_cell def _cell_list(unit_type, num_units, num_layers, num_residual_layers, forget_bias, dropout, mode, single_cell_fn=None, residual_fn=None, trainable=True): if single_cell_fn is None: single_cell_fn = _single_cell cell_list = [] for i in range(num_layers): single_cell = single_cell_fn( unit_type=unit_type, num_units=num_units, forget_bias=forget_bias, dropout=dropout, mode=mode, residual_connection=(i >= num_layers - num_residual_layers), residual_fn=residual_fn, trainable=trainable) cell_list.append(single_cell) return cell_list class GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell): def __init__(self, attention_cell, cells, use_new_attention=False): cells = [attention_cell] + cells self.use_new_attention = use_new_attention super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True) def __call__(self, inputs, state, scope=None): if not contrib_framework.nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) scope = "multi_rnn_cell" if scope is None else scope with tf.variable_scope(scope): new_states = [] with tf.variable_scope("cell_0_attention"): attention_cell = self._cells[0] attention_state = state[0] cur_inp, new_attention_state = attention_cell(inputs, attention_state) new_states.append(new_attention_state) for i in range(1, len(self._cells)): with tf.variable_scope("cell_%d" % i): cell = self._cells[i] cur_state = state[i] if self.use_new_attention: cur_inp = tf.concat([cur_inp, new_attention_state.attention], -1) else: cur_inp = tf.concat([cur_inp, attention_state.attention], -1) cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) return cur_inp, tuple(new_states)
Apache License 2.0
arviz-devs/arviz
arviz/plots/backends/matplotlib/ppcplot.py
_empirical_cdf
python
def _empirical_cdf(data): return np.sort(data), np.linspace(0, 1, len(data))
Compute empirical cdf of a numpy array. Parameters ---------- data : np.array 1d array Returns ------- np.array, np.array x and y coordinates for the empirical cdf of the data
https://github.com/arviz-devs/arviz/blob/a934308e8d8f63b2b6b06b3badf7c93a88112c97/arviz/plots/backends/matplotlib/ppcplot.py#L453-L466
import logging import platform import matplotlib.pyplot as plt import numpy as np from matplotlib import animation, get_backend from ....stats.density_utils import get_bins, histogram, kde from ...kdeplot import plot_kde from ...plot_utils import _scale_fig_size from . import backend_kwarg_defaults, backend_show, create_axes_grid _log = logging.getLogger(__name__) def plot_ppc( ax, length_plotters, rows, cols, figsize, animated, obs_plotters, pp_plotters, predictive_dataset, pp_sample_ix, kind, alpha, colors, textsize, mean, observed, jitter, total_pp_samples, legend, labeller, group, animation_kwargs, num_pp_samples, backend_kwargs, show, ): if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults(), **backend_kwargs, } if animation_kwargs is None: animation_kwargs = {} if platform.system() == "Linux": animation_kwargs.setdefault("blit", True) else: animation_kwargs.setdefault("blit", False) if alpha is None: if animated: alpha = 1 else: if kind.lower() == "scatter": alpha = 0.7 else: alpha = 0.2 if jitter is None: jitter = 0.0 if jitter < 0.0: raise ValueError("jitter must be >=0") if animated: try: shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell" and get_backend() != "nbAgg": raise Warning( "To run animations inside a notebook you have to use the nbAgg backend. " "Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch " "back to the default backend with `%matplotlib inline` or " "`%matplotlib auto`." ) except NameError: pass if animation_kwargs["blit"] and platform.system() != "Linux": _log.warning( "If you experience problems rendering the animation try setting " "`animation_kwargs({'blit':False}) or changing the plotting backend " "(e.g. to TkAgg)" ) (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size( figsize, textsize, rows, cols ) backend_kwargs.setdefault("figsize", figsize) backend_kwargs.setdefault("squeeze", True) if ax is None: fig, axes = create_axes_grid(length_plotters, rows, cols, backend_kwargs=backend_kwargs) else: axes = np.ravel(ax) if len(axes) != length_plotters: raise ValueError( "Found {} variables to plot but {} axes instances. They must be equal.".format( length_plotters, len(axes) ) ) if animated: fig = axes[0].get_figure() if not all((ax.get_figure() is fig for ax in axes)): raise ValueError("All axes must be on the same figure for animation to work") for i, ax_i in enumerate(np.ravel(axes)[:length_plotters]): var_name, selection, isel, obs_vals = obs_plotters[i] pp_var_name, _, _, pp_vals = pp_plotters[i] dtype = predictive_dataset[pp_var_name].dtype.kind if dtype not in ["i", "f"]: raise ValueError( f"The data type of the predictive data must be one of 'i' or 'f', but is '{dtype}'" ) obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == "kde": plot_kwargs = {"color": colors[0], "alpha": alpha, "linewidth": 0.5 * linewidth} if dtype == "i": plot_kwargs["drawstyle"] = "steps-pre" ax_i.plot([], color=colors[0], label=f"{group.capitalize()} predictive") if observed: if dtype == "f": plot_kde( obs_vals, label="Observed", plot_kwargs={"color": colors[1], "linewidth": linewidth, "zorder": 3}, fill_kwargs={"alpha": 0}, ax=ax_i, legend=legend, ) else: bins = get_bins(obs_vals) _, hist, bin_edges = histogram(obs_vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, label="Observed", color=colors[1], linewidth=linewidth, zorder=3, drawstyle=plot_kwargs["drawstyle"], ) pp_densities = [] pp_xs = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == "f": pp_x, pp_density = kde(vals) pp_densities.append(pp_density) pp_xs.append(pp_x) else: bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) pp_densities.append(hist) pp_xs.append(bin_edges) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs ) else: if dtype == "f": ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities), **plot_kwargs) else: for x_s, y_s in zip(pp_xs, pp_densities): ax_i.plot(x_s, y_s, **plot_kwargs) if mean: label = f"{group.capitalize()} predictive mean" if dtype == "f": rep = len(pp_densities) len_density = len(pp_densities[0]) new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density) new_d = np.zeros((rep, len_density)) bins = np.digitize(pp_xs, new_x, right=True) new_x -= (new_x[1] - new_x[0]) / 2 for irep in range(rep): new_d[irep][bins[irep]] = pp_densities[irep] ax_i.plot( new_x, new_d.mean(0), color=colors[2], linestyle="--", linewidth=linewidth * 1.5, zorder=2, label=label, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, color=colors[2], linewidth=linewidth * 1.5, label=label, zorder=2, linestyle="--", drawstyle=plot_kwargs["drawstyle"], ) ax_i.tick_params(labelsize=xt_labelsize) ax_i.set_yticks([]) elif kind == "cumulative": drawstyle = "default" if dtype == "f" else "steps-pre" if observed: ax_i.plot( *_empirical_cdf(obs_vals), color=colors[1], linewidth=linewidth, label="Observed", drawstyle=drawstyle, zorder=3, ) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, kind=kind, alpha=alpha, drawstyle=drawstyle, linewidth=linewidth, ) else: pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size)) for idx, vals in enumerate(pp_sampled_vals): vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities[2 * idx] = pp_x pp_densities[2 * idx + 1] = pp_density ax_i.plot( *pp_densities, alpha=alpha, color=colors[0], drawstyle=drawstyle, linewidth=linewidth, ) ax_i.plot([], color=colors[0], label="Posterior predictive") if mean: ax_i.plot( *_empirical_cdf(pp_vals.flatten()), color=colors[2], linestyle="--", linewidth=linewidth * 1.5, drawstyle=drawstyle, label="Posterior predictive mean", ) ax_i.set_yticks([0, 0.5, 1]) elif kind == "scatter": if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": colors[2], "linestyle": "--", "linewidth": linewidth * 1.5, "zorder": 3, }, label="Posterior predictive mean", ax=ax_i, legend=legend, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, color=colors[2], linewidth=linewidth * 1.5, label="Posterior predictive mean", zorder=3, linestyle="--", drawstyle="steps-pre", ) _, limit = ax_i.get_ylim() limit *= 1.05 y_rows = np.linspace(0, limit, num_pp_samples + 1) jitter_scale = y_rows[1] - y_rows[0] scale_low = 0 scale_high = jitter_scale * jitter if observed: obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform( low=scale_low, high=scale_high, size=len(obs_vals) ) ax_i.plot( obs_vals, obs_yvals, "o", color=colors[1], markersize=markersize, alpha=alpha, label="Observed", zorder=4, ) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, kind=kind, color=colors[0], height=y_rows.mean() * 0.5, markersize=markersize, ) else: for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax_i.plot( vals, yvals, "o", zorder=2, color=colors[0], markersize=markersize, alpha=alpha, ) ax_i.plot([], color=colors[0], marker="o", label="Posterior predictive") ax_i.set_yticks([]) ax_i.set_xlabel( labeller.make_pp_label(var_name, pp_var_name, selection, isel), fontsize=ax_labelsize ) if legend: if i == 0: ax_i.legend(fontsize=xt_labelsize * 0.75) else: ax_i.legend([]) if backend_show(show): plt.show() if animated: ani = animation.FuncAnimation( fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs ) return axes, ani else: return axes def _set_animation( pp_sampled_vals, ax, dtype=None, kind="density", alpha=None, color=None, drawstyle=None, linewidth=None, height=None, markersize=None, plot_kwargs=None, ): if kind == "kde": length = len(pp_sampled_vals) if dtype == "f": x_vals, y_vals = kde(pp_sampled_vals[0]) max_max = max([max(kde(pp_sampled_vals[i])[1]) for i in range(length)]) ax.set_ylim(0, max_max) (line,) = ax.plot(x_vals, y_vals, **plot_kwargs) def animate(i): x_vals, y_vals = kde(pp_sampled_vals[i]) line.set_data(x_vals, y_vals) return (line,) else: vals = pp_sampled_vals[0] _, y_vals, x_vals = histogram(vals, bins="auto") (line,) = ax.plot(x_vals[:-1], y_vals, **plot_kwargs) max_max = max( [max(histogram(pp_sampled_vals[i], bins="auto")[1]) for i in range(length)] ) ax.set_ylim(0, max_max) def animate(i): _, y_vals, x_vals = histogram(pp_sampled_vals[i], bins="auto") line.set_data(x_vals[:-1], y_vals) return (line,) elif kind == "cumulative": x_vals, y_vals = _empirical_cdf(pp_sampled_vals[0]) (line,) = ax.plot( x_vals, y_vals, alpha=alpha, color=color, drawstyle=drawstyle, linewidth=linewidth ) def animate(i): x_vals, y_vals = _empirical_cdf(pp_sampled_vals[i]) line.set_data(x_vals, y_vals) return (line,) elif kind == "scatter": x_vals = pp_sampled_vals[0] y_vals = np.full_like(x_vals, height, dtype=np.float64) (line,) = ax.plot( x_vals, y_vals, "o", zorder=2, color=color, markersize=markersize, alpha=alpha ) def animate(i): line.set_xdata(np.ravel(pp_sampled_vals[i])) return (line,) def init(): if kind != "scatter": line.set_data([], []) else: line.set_xdata([]) return (line,) return animate, init
Apache License 2.0
ericssonresearch/calvin-base
calvin/runtime/north/calvincontrol.py
CalvinControlTunnelServer.tunnel_down
python
def tunnel_down(self, tunnel): self.controltunnels[tunnel.peer_node_id].close() del self.tunnels[tunnel.peer_node_id] del self.controltunnels[tunnel.peer_node_id] return True
Callback that the tunnel is not accepted or is going down
https://github.com/ericssonresearch/calvin-base/blob/bc4645c2061c30ca305a660e48dc86e3317f5b6f/calvin/runtime/north/calvincontrol.py#L261-L267
import json from random import randint from urlparse import urlparse from calvin.utilities.calvinlogger import get_logger from calvin.utilities.calvin_callback import CalvinCB from calvin.runtime.south.async import server_connection from calvin.requests import calvinresponse from calvin.utilities.security import Security from calvin.utilities import calvinuuid from calvin.utilities.issuetracker import IssueTracker from control_apis import routes from control_apis import security_api from control_apis import runtime_api from control_apis import application_api from control_apis import documentation_api from control_apis import logging_api from control_apis import registry_api from control_apis import uicalvinsys_api from control_apis import proxyhandler_api _log = get_logger(__name__) _calvincontrol = None def get_calvincontrol(): global _calvincontrol if _calvincontrol is None: _calvincontrol = CalvinControl() return _calvincontrol class CalvinControl(object): def __init__(self): self.node = None self.loggers = {} self.server = None self.connections = {} self.tunnel = None self.host = None self.tunnel_server = None self.tunnel_client = None self.security = None self.token_dict = None self.routes = routes.install_handlers(self) def start(self, node, uri, tunnel=False, external_uri=None): self.node = node self.security = Security(self.node) schema, _ = uri.split(':', 1) if tunnel: self.tunnel_client = CalvinControlTunnelClient(uri, self) else: url = urlparse(uri) self.port = int(url.port) self.host = url.hostname if external_uri is not None: self.external_host = urlparse(external_uri).hostname else: self.external_host = self.host _log.info("Control API listening on: %s:%s" % (self.host, self.port)) self.server = server_connection.ServerProtocolFactory(self.handle_request, "http", node_name=node.node_name) self.server.start(self.host, self.port) self.tunnel_server = CalvinControlTunnelServer(self.node) def stop(self): self.server.stop() if self.tunnel_server is not None: self.tunnel_server.stop() if self.tunnel_client is not None: self.tunnel_client.stop() def close_log_tunnel(self, handle): for user_id, logger in self.loggers: if logger.handle == handle: del self.loggers[user_id] def handle_request(self, actor_ids=None): if self.server.pending_connections: addr, conn = self.server.accept() self.connections[addr] = conn for handle, connection in self.connections.items(): if connection.data_available: command, headers, data = connection.data_get() self.route_request(handle, connection, command, headers, data) def _handler_for_route(self, command): for re_route, handler in self.routes: match_object = re_route.match(command) if match_object: return handler, match_object return None, None def route_request(self, handle, connection, command, headers, data): if self.node.quitting: self.send_response(handle, connection, None, status=calvinresponse.INTERNAL_ERROR) return try: issuetracker = IssueTracker() handler, match = self._handler_for_route(command) if handler: credentials = None if data: data = json.loads(data) _log.debug("Calvin control handles:%s\n%s\n---------------" % (command, data)) handler(handle, connection, match, data, headers) else: _log.error("No route found for: %s\n%s" % (command, data)) self.send_response(handle, connection, None, status=404) except Exception as e: _log.info("Failed to parse request", exc_info=e) self.send_response(handle, connection, None, status=calvinresponse.BAD_REQUEST) def send_response(self, handle, connection, data, status=200, content_type=None): content_type = content_type or "Content-Type: application/json" content_type += "\n" if data is None and status in range(200, 207): status = 204 header = "HTTP/1.0 " + str(status) + " " + calvinresponse.RESPONSE_CODES[status] + "\n" + ("" if data is None else content_type ) + "Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\n" + "Access-Control-Allow-Origin: *\r\n" + "\n" if connection is None: msg = {"cmd": "httpresp", "msgid": handle, "header": header, "data": data} self.tunnel_client.send(msg) else: if not connection.connection_lost: connection.send(header) if data: connection.send(data) connection.close() del self.connections[handle] def send_streamheader(self, handle, connection): response = "HTTP/1.0 200 OK\n" + "Content-Type: text/event-stream\n" + "Access-Control-Allow-Origin: *\r\n" + "\n" if connection is not None: if not connection.connection_lost: connection.send(response) elif self.tunnel_client is not None: msg = {"cmd": "logresp", "msgid": handle, "header": response, "data": None} self.tunnel_client.send(msg) def log_actor_firing(self, actor_id, action_method, tokens_produced, tokens_consumed, production): pass def log_actor_new(self, actor_id, actor_name, actor_type, is_shadow): pass def log_actor_destroy(self, actor_id): pass def log_actor_migrate(self, actor_id, dest_node_id): pass def log_actor_replicate(self, actor_id, replica_actor_id, replication_id, dest_node_id): pass def log_actor_dereplicate(self, actor_id, replica_actor_id, replication_id): pass def log_application_new(self, application_id, application_name): pass def log_application_destroy(self, application_id): pass def log_link_connected(self, peer_id, uri): pass def log_link_disconnected(self, peer_id): pass def log_log_message(self, message): pass class CalvinControlTunnelServer(object): def __init__(self, node): self.node = node self.tunnels = {} self.controltunnels = {} self.node.proto.register_tunnel_handler("control", CalvinCB(self.tunnel_request_handles)) def stop(self): for _, control in self.controltunnels.items(): control.close() def tunnel_request_handles(self, tunnel): self.tunnels[tunnel.peer_node_id] = tunnel self.controltunnels[tunnel.peer_node_id] = CalvinControlTunnel(tunnel) tunnel.register_tunnel_down(CalvinCB(self.tunnel_down, tunnel)) tunnel.register_tunnel_up(CalvinCB(self.tunnel_up, tunnel)) tunnel.register_recv(CalvinCB(self.tunnel_recv_handler, tunnel)) return True
Apache License 2.0
city-of-helsinki/wagtail-svgmap
wagtail_svgmap/svg.py
serialize_svg
python
def serialize_svg(tree, encoding='UTF-8', xml_declaration=True): bio = BytesIO() fixup_unqualified_attributes(tree, namespace=SVG_NAMESPACE) tree.write(bio, encoding=encoding, xml_declaration=xml_declaration, default_namespace=SVG_NAMESPACE) return bio.getvalue().decode(encoding)
Serialize an ElementTree as SVG. :param tree: The tree to process. :type tree: xml.etree.ElementTree.ElementTree :param xml_declaration: Whether to emit the XML declaration tag or not :type xml_declaration: bool :return: the serialized XML (as an Unicode string) :rtype: str
https://github.com/city-of-helsinki/wagtail-svgmap/blob/a9883570029a23b555fd0ce2b8bdfc423c3032ed/wagtail_svgmap/svg.py#L168-L182
import re from six import BytesIO try: from xml.etree import cElementTree as ET except ImportError: from xml.etree import ElementTree as ET SVG_NAMESPACE = 'http://www.w3.org/2000/svg' XLINK_NAMESPACE = 'http://www.w3.org/1999/xlink' ET.register_namespace('svg', SVG_NAMESPACE) ET.register_namespace('xlink', XLINK_NAMESPACE) VISIBLE_SVG_TAGS = frozenset({ 'a', 'circle', 'ellipse', 'g', 'image', 'line', 'path', 'polygon', 'polyline', 'rect', 'switch', 'text', 'textPath', 'tref', 'tspan', 'use', }) def find_ids(svg_stream, in_elements=VISIBLE_SVG_TAGS): for event, elem in ET.iterparse(svg_stream, events=('end',)): tag_without_ns = elem.tag.split('}')[-1] if in_elements and tag_without_ns not in in_elements: continue id = elem.get('id') if id: yield id class Link(object): def __init__(self, url, target=None): self.url = str(url) self.target = target def get_element(self): return ET.Element( '{%s}a' % SVG_NAMESPACE, dict(kv for kv in self.get_element_attribs().items() if kv[0] and kv[1]) ) def get_element_attribs(self): return { ('{%s}href' % XLINK_NAMESPACE): self.url, ('{%s}target' % SVG_NAMESPACE): self.target, } def wrap_elements_in_links(tree, id_to_url_map, in_elements=VISIBLE_SVG_TAGS): if isinstance(tree, str) or hasattr(tree, 'read'): tree = ET.parse(tree) parent_map = {child: parent for parent in tree.iter() for child in parent} element_to_url = {} for elem in parent_map.keys(): tag_without_ns = elem.tag.split('}')[-1] if in_elements and tag_without_ns not in in_elements: continue url = id_to_url_map.get(elem.get('id')) if not url: continue if isinstance(url, str): url = Link(url) element_to_url[elem] = url for elem, url in element_to_url.items(): a_element = url.get_element() parent = parent_map[elem] for index, test_element in enumerate(parent): if elem is test_element: break else: raise ValueError("tree broken") parent.remove(elem) a_element.append(elem) elem.tail = (elem.tail or '').strip() parent.insert(index, a_element) parent_map[elem] = a_element return tree def fixup_unqualified_attributes(tree, namespace): for elem in tree.iter(): for key in [key for key in elem.keys() if key[0] != '{']: elem.attrib['{%s}%s' % (namespace, key)] = elem.attrib.pop(key) return tree
BSD 3-Clause New or Revised License
cuthbertlab/music21-tools
trecento/quodJactatur.py
findRetrogradeVoices
python
def findRetrogradeVoices(show=True): for transpose in [1, 2, -2, 3, -3, 4, -4]: for invert in [False, True]: qj1 = getQJ() qj2 = getQJ() if transpose != 1: transposeStreamDiatonic(qj2, transpose) if invert is True: qj2.invertDiatonic(qj2.flat.notesAndRests[0], inPlace=True) qj2 = reverse(qj2, makeNotation = False) qj = stream.Score() qj.insert(0, qj2.flat) qj.insert(0, qj1.flat) qjChords = qj.chordify() consScore = 0 totIntervals = 0 for n in qjChords.flat.notesAndRests: strength = getStrengthForNote(n) if n.isRest is True or len(n.pitches) < 2: thisScore = strength else: int1 = interval.Interval(n.pitches[0], n.pitches[1]) if int1.generic.simpleUndirected in [1, 3, 4, 5]: thisScore = strength elif int1.generic.simpleUndirected == 6: thisScore = strength / 2.0 else: thisScore = -2 * strength if n.duration.quarterLength < 2: thisScore = thisScore * n.duration.quarterLength else: thisScore = thisScore * 8 consScore += thisScore totIntervals += 1 n.lyric = str(thisScore) finalScore = int(100*(consScore + 0.0)/totIntervals) qj.insert(0, qjChords.flat) qj2.flat.notesAndRests[0].addLyric('Trans: ' + str(transpose)) qj2.flat.notesAndRests[0].addLyric('Invert: ' + str(invert)) qj1.flat.notesAndRests[0].addLyric('Score: ' + str(finalScore)) if show: qj.show() else: if invert: invStr = "Invert" else: invStr = " " print(str(transpose) + " " + invStr + " " + str(finalScore))
the structure of the piece strongly suggests a retrograde solution (e.g., there is a cadence in m5 and five measures from the end and one at the exact center). This method tries all transpositions of one voice vs. the other and gives positive points to intervals of 3, 4, 5, 6, and 8 (incl. tritones, since they might be fixed w/ other voices; 4th is included since there could be a 3rd or 5th below it).
https://github.com/cuthbertlab/music21-tools/blob/78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee/trecento/quodJactatur.py#L205-L264
import copy import unittest from music21 import clef from music21 import corpus from music21 import exceptions21 from music21 import instrument from music21 import interval from music21 import key from music21 import layout from music21 import metadata from music21 import meter from music21 import note from music21 import stream def reverse(self, *, inPlace=False, classesToMove=(key.KeySignature, meter.TimeSignature, clef.Clef, metadata.Metadata, instrument.Instrument, layout.SystemLayout), makeNotation=False): highestTime = self.highestTime if inPlace is True: returnObj = self raise Exception("Whoops haven't written inPlace=True yet for reverse") else: returnObj = stream.Part() sf = self.flat for myEl in sf: if isinstance(myEl, classesToMove): continue if myEl.duration is not None: releaseTime = myEl.getOffsetBySite(sf) + myEl.duration.quarterLength else: releaseTime = myEl.getOffsetBySite(sf) newOffset = highestTime - releaseTime returnObj.insert(newOffset, myEl) returnObj.insert(0, sf.getElementsByClass(layout.SystemLayout)[0]) returnObj.insert(0, sf.getElementsByClass(clef.Clef)[0]) returnObj.insert(0, sf.getElementsByClass(key.KeySignature)[0]) returnObj.insert(0, sf.getElementsByClass(meter.TimeSignature)[0]) returnObj.insert(0, sf.getElementsByClass(instrument.Instrument)[0]) for thisP in returnObj.flat.pitches: if thisP.accidental is not None: thisP.accidental.displayStatus = None if makeNotation is True: return returnObj.makeNotation() else: return returnObj def prependBlankMeasures(myStream, measuresToAppend=1, *, inPlace=False): measureDuration = myStream.flat.getElementsByClass(meter.TimeSignature )[0].barDuration.quarterLength if inPlace: ms = myStream else: ms = copy.deepcopy(myStream) for dummy in range(measuresToAppend): qjBlankM = stream.Measure() hr = note.Rest() hr.duration.quarterLength = measureDuration qjBlankM.append(hr) ms.insertAndShift(0, qjBlankM) return ms def transposeStreamDiatonic(myStream, diatonicInterval=1): if diatonicInterval == 1: return myStream for n in myStream.flat.notesAndRests: if n.isRest is False: if diatonicInterval >= 1: n.pitch.diatonicNoteNum += diatonicInterval - 1 else: n.pitch.diatonicNoteNum += diatonicInterval + 1 if n.pitch.step == 'B': n.pitch.name = 'B-' else: n.pitch.name = n.pitch.step n.pitch.accidental = None return myStream PERFCONS = ['P1', 'P5', 'P8'] IMPERFCONS = ['m3', 'M3', 'm6', 'M6'] cachedParts = {} def getQJ(): qj = corpus.parse("ciconia/quod_jactatur") qjPart = qj.getElementsByClass(stream.Part)[0] qjPart.transpose("P-8", inPlace=True) qjPart.replace(qjPart.flat.getElementsByClass(clef.Clef)[0], clef.BassClef()) cachedParts['1-0-False-False'] = copy.deepcopy(qjPart) return qjPart
BSD 3-Clause New or Revised License
online-ml/river
river/neighbors/sam_knn.py
STMSizer.get_new_stm_size
python
def get_new_stm_size( aprox_adaption_strategy, labels, n_neighbours, get_labels_fct, prediction_histories, distances_stm, min_stm_size, ): if aprox_adaption_strategy: return STMSizer._get_max_acc_approx_window_size( labels, n_neighbours, get_labels_fct, prediction_histories, distances_stm, min_size=min_stm_size, ) elif aprox_adaption_strategy is not None and not aprox_adaption_strategy: return STMSizer._get_max_acc_window_size( labels, n_neighbours, get_labels_fct, prediction_histories, distances_stm, min_size=min_stm_size, ) elif aprox_adaption_strategy is None: return len(labels), prediction_histories else: raise Exception(f"Invalid adaption_strategy: {aprox_adaption_strategy}")
Returns the new STM size.
https://github.com/online-ml/river/blob/842f7c5be5574e62a3aab0b46d996eb5f1d73beb/river/neighbors/sam_knn.py#L576-L610
import copy as cp import logging from collections import deque import numpy as np from river.base import Classifier from river.utils import dict2numpy from . import libNearestNeighbor class SAMKNNClassifier(Classifier): def __init__( self, n_neighbors: int = 5, distance_weighting=True, window_size: int = 5000, ltm_size: float = 0.4, min_stm_size: int = 50, stm_aprox_adaption=True, use_ltm=True, ): super().__init__() self.n_neighbors = n_neighbors self.distance_weighting = distance_weighting self.window_size = window_size self.ltm_size = ltm_size self.min_stm_size = min_stm_size self.use_ltm = use_ltm self._stm_samples = None self._stm_labels = np.empty(shape=0, dtype=np.int32) self._ltm_samples = None self._ltm_labels = np.empty(shape=0, dtype=np.int32) self.max_ltm_size = self.ltm_size * self.window_size self.max_stm_size = self.window_size - self.max_ltm_size self.min_stm_size = self.min_stm_size self.stm_aprox_adaption = stm_aprox_adaption self.stm_distances = np.zeros(shape=(window_size + 1, window_size + 1)) if self.distance_weighting: self.get_labels_fct = SAMKNNClassifier._get_distance_weighted_label else: self.get_labels_fct = SAMKNNClassifier._get_maj_label if self.use_ltm: self.predict_fct = self._predict_by_all_memories self.size_check_fct = self._size_check_stmltm else: self.predict_fct = self._predict_by_stm self.size_check_fct = self._size_check_fade_out self.interleaved_pred_histories = {} self.ltm_pred_history = deque([]) self.stm_pred_history = deque([]) self.cmp_pred_history = deque([]) self.train_step_count = 0 self.stm_sizes = [] self.ltm_sizes = [] self.n_stm_correct = 0 self.n_ltm_correct = 0 self.n_cm_correct = 0 self.n_possible_correct_predictions = 0 self.n_correct_predictions = 0 self.classifier_choice = [] self.pred_history = [] def _unit_test_skips(self): return {"check_emerging_features", "check_disappearing_features"} @staticmethod def _get_distances(sample, samples): return libNearestNeighbor.get1ToNDistances(sample, samples) def _cluster_down(self, samples, labels): from sklearn.cluster import KMeans logging.debug("cluster Down %d" % self.train_step_count) uniqueLabels = np.unique(labels) newSamples = np.empty(shape=(0, samples.shape[1])) newLabels = np.empty(shape=0, dtype=np.int32) for label in uniqueLabels: tmpSamples = samples[labels == label] newLength = int(max(tmpSamples.shape[0] / 2, 1)) clustering = KMeans(n_clusters=newLength, n_init=1, random_state=0) clustering.fit(tmpSamples) newSamples = np.vstack([newSamples, clustering.cluster_centers_]) newLabels = np.append( newLabels, label * np.ones(shape=newLength, dtype=np.int32) ) return newSamples, newLabels def _size_check_fade_out(self): STMShortened = False if len(self._stm_labels) > self.max_stm_size + self.max_ltm_size: STMShortened = True self._stm_samples = np.delete(self._stm_samples, 0, 0) self._stm_labels = np.delete(self._stm_labels, 0, 0) self.stm_distances[ : len(self._stm_labels), : len(self._stm_labels) ] = self.stm_distances[ 1 : len(self._stm_labels) + 1, 1 : len(self._stm_labels) + 1 ] if self.stm_aprox_adaption: key_set = list(self.interleaved_pred_histories.keys()) if 0 in key_set: self.interleaved_pred_histories[0].pop(0) updated_histories = cp.deepcopy(self.interleaved_pred_histories) for key in self.interleaved_pred_histories.keys(): if key > 0: if key == 1: updated_histories.pop(0, None) tmp = updated_histories[key] updated_histories.pop(key, None) updated_histories[key - 1] = tmp self.interleaved_pred_histories = updated_histories else: self.interleaved_pred_histories = {} return STMShortened def _size_check_stmltm(self): stm_shortened = False if ( len(self._stm_labels) + len(self._ltm_labels) > self.max_stm_size + self.max_ltm_size ): if len(self._ltm_labels) > self.max_ltm_size: self._ltm_samples, self._ltm_labels = self._cluster_down( self._ltm_samples, self._ltm_labels ) else: if ( len(self._stm_labels) + len(self._ltm_labels) > self.max_stm_size + self.max_ltm_size ): stm_shortened = True n_shifts = int(self.max_ltm_size - len(self._ltm_labels) + 1) shift_range = range(n_shifts) self._ltm_samples = np.vstack( [self._ltm_samples, self._stm_samples[:n_shifts, :]] ) self._ltm_labels = np.append( self._ltm_labels, self._stm_labels[:n_shifts] ) self._ltm_samples, self._ltm_labels = self._cluster_down( self._ltm_samples, self._ltm_labels ) self._stm_samples = np.delete(self._stm_samples, shift_range, 0) self._stm_labels = np.delete(self._stm_labels, shift_range, 0) self.stm_distances[ : len(self._stm_labels), : len(self._stm_labels) ] = self.stm_distances[ n_shifts : len(self._stm_labels) + n_shifts, n_shifts : len(self._stm_labels) + n_shifts, ] for _ in shift_range: self.ltm_pred_history.popleft() self.stm_pred_history.popleft() self.cmp_pred_history.popleft() self.interleaved_pred_histories = {} return stm_shortened def _clean_samples(self, samples_cl, labels_cl, only_last=False): if len(self._stm_labels) > self.n_neighbors and samples_cl.shape[0] > 0: if only_last: loop_range = [len(self._stm_labels) - 1] else: loop_range = range(len(self._stm_labels)) for i in loop_range: if len(labels_cl) == 0: break samples_shortened = np.delete(self._stm_samples, i, 0) labels_shortened = np.delete(self._stm_labels, i, 0) distances_stm = SAMKNNClassifier._get_distances( self._stm_samples[i, :], samples_shortened ) nn_indices_stm = libNearestNeighbor.nArgMin( self.n_neighbors, distances_stm )[0] distances_ltm = SAMKNNClassifier._get_distances( self._stm_samples[i, :], samples_cl ) nn_indices_ltm = libNearestNeighbor.nArgMin( min(len(distances_ltm), self.n_neighbors), distances_ltm )[0] correct_indices_stm = nn_indices_stm[ labels_shortened[nn_indices_stm] == self._stm_labels[i] ] if len(correct_indices_stm) > 0: dist_threshold = np.max(distances_stm[correct_indices_stm]) wrong_indices_ltm = nn_indices_ltm[ labels_cl[nn_indices_ltm] != self._stm_labels[i] ] del_indices = np.where( distances_ltm[wrong_indices_ltm] <= dist_threshold )[0] samples_cl = np.delete( samples_cl, wrong_indices_ltm[del_indices], 0 ) labels_cl = np.delete(labels_cl, wrong_indices_ltm[del_indices], 0) return samples_cl, labels_cl def _learn_one(self, x, y): distances_stm = SAMKNNClassifier._get_distances(x, self._stm_samples) if not self.use_ltm: self._learn_one_by_stm(x, y, distances_stm) else: self._learn_one_by_all_memories(x, y, distances_stm) self.train_step_count += 1 self._stm_samples = np.vstack([self._stm_samples, x]) self._stm_labels = np.append(self._stm_labels, y) stm_shortened = self.size_check_fct() self._ltm_samples, self._ltm_labels = self._clean_samples( self._ltm_samples, self._ltm_labels, only_last=True ) if self.stm_aprox_adaption is not None: if stm_shortened: distances_stm = SAMKNNClassifier._get_distances( x, self._stm_samples[:-1, :] ) self.stm_distances[ len(self._stm_labels) - 1, : len(self._stm_labels) - 1 ] = distances_stm old_window_size = len(self._stm_labels) ( new_window_size, self.interleaved_pred_histories, ) = STMSizer.get_new_stm_size( self.stm_aprox_adaption, self._stm_labels, self.n_neighbors, self.get_labels_fct, self.interleaved_pred_histories, self.stm_distances, self.min_stm_size, ) if new_window_size < old_window_size: del_range = range(old_window_size - new_window_size) old_stm_samples = self._stm_samples[del_range, :] old_stm_labels = self._stm_labels[del_range] self._stm_samples = np.delete(self._stm_samples, del_range, 0) self._stm_labels = np.delete(self._stm_labels, del_range, 0) self.stm_distances[ : len(self._stm_labels), : len(self._stm_labels) ] = self.stm_distances[ (old_window_size - new_window_size) : ( (old_window_size - new_window_size) + len(self._stm_labels) ), (old_window_size - new_window_size) : ( (old_window_size - new_window_size) + len(self._stm_labels) ), ] if self.use_ltm: for _ in del_range: self.stm_pred_history.popleft() self.ltm_pred_history.popleft() self.cmp_pred_history.popleft() old_stm_samples, old_stm_labels = self._clean_samples( old_stm_samples, old_stm_labels ) self._ltm_samples = np.vstack([self._ltm_samples, old_stm_samples]) self._ltm_labels = np.append(self._ltm_labels, old_stm_labels) self.size_check_fct() self.stm_sizes.append(len(self._stm_labels)) self.ltm_sizes.append(len(self._ltm_labels)) def _learn_one_by_all_memories(self, sample, label, distances_stm): predicted_label_ltm = 0 predicted_label_stm = 0 predicted_label_both = 0 classifier_choice = 0 if len(self._stm_labels) == 0: predicted_label = predicted_label_stm else: if len(self._stm_labels) < self.n_neighbors: predicted_label_stm = self.get_labels_fct( distances_stm, self._stm_labels, len(self._stm_labels) )[0] predicted_label = predicted_label_stm else: distances_ltm = SAMKNNClassifier._get_distances( sample, self._ltm_samples ) predicted_label_stm = self.get_labels_fct( distances_stm, self._stm_labels, self.n_neighbors )[0] predicted_label_both = self.get_labels_fct( np.append(distances_stm, distances_ltm), np.append(self._stm_labels, self._ltm_labels), self.n_neighbors, )[0] if len(self._ltm_labels) >= self.n_neighbors: predicted_label_ltm = self.get_labels_fct( distances_ltm, self._ltm_labels, self.n_neighbors )[0] correct_ltm = np.sum(self.ltm_pred_history) correct_stm = np.sum(self.stm_pred_history) correct_both = np.sum(self.cmp_pred_history) labels = [ predicted_label_stm, predicted_label_ltm, predicted_label_both, ] classifier_choice = np.argmax( [correct_stm, correct_ltm, correct_both] ) predicted_label = labels[classifier_choice] else: predicted_label = predicted_label_stm self.classifier_choice.append(classifier_choice) self.cmp_pred_history.append(predicted_label_both == label) self.n_cm_correct += predicted_label_both == label self.stm_pred_history.append(predicted_label_stm == label) self.n_stm_correct += predicted_label_stm == label self.ltm_pred_history.append(predicted_label_ltm == label) self.n_ltm_correct += predicted_label_ltm == label self.n_possible_correct_predictions += label in [ predicted_label_stm, predicted_label_both, predicted_label_ltm, ] self.n_correct_predictions += predicted_label == label return predicted_label def _predict_by_all_memories(self, sample, label, distances_stm): predicted_label_stm = 0 if len(self._stm_labels) == 0: predicted_label = predicted_label_stm else: if len(self._stm_labels) < self.n_neighbors: predicted_label_stm = self.get_labels_fct( distances_stm, self._stm_labels, len(self._stm_labels) )[0] predicted_label = predicted_label_stm else: distances_ltm = SAMKNNClassifier._get_distances( sample, self._ltm_samples ) predicted_label_stm = self.get_labels_fct( distances_stm, self._stm_labels, self.n_neighbors )[0] distances_new = cp.deepcopy(distances_stm) stm_labels_new = cp.deepcopy(self._stm_labels) predicted_label_both = self.get_labels_fct( np.append(distances_new, distances_ltm), np.append(stm_labels_new, self._ltm_labels), self.n_neighbors, )[0] if len(self._ltm_labels) >= self.n_neighbors: predicted_label_ltm = self.get_labels_fct( distances_ltm, self._ltm_labels, self.n_neighbors )[0] correct_ltm = np.sum(self.ltm_pred_history) correct_stm = np.sum(self.stm_pred_history) correct_both = np.sum(self.cmp_pred_history) labels = [ predicted_label_stm, predicted_label_ltm, predicted_label_both, ] classifier_choice = np.argmax( [correct_stm, correct_ltm, correct_both] ) predicted_label = labels[classifier_choice] else: predicted_label = predicted_label_stm return predicted_label def _learn_one_by_stm(self, sample, label, distances_stm): pass def _predict_by_stm(self, sample, label, distances_stm): predicted_label = 0 curr_len = len(self._stm_labels) if curr_len > 0: predicted_label = self.get_labels_fct( distances_stm, self._stm_labels, min(self.n_neighbors, curr_len) )[0] return predicted_label def learn_one(self, x, y) -> "Classifier": x_array = dict2numpy(x) c = len(x_array) if self._stm_samples is None: self._stm_samples = np.empty(shape=(0, c)) self._ltm_samples = np.empty(shape=(0, c)) self._learn_one(x_array, y) return self def predict_one(self, x: dict): x_array = dict2numpy(x) c = len(x_array) if self._stm_samples is None: self._stm_samples = np.empty(shape=(0, c)) self._ltm_samples = np.empty(shape=(0, c)) distances_stm = SAMKNNClassifier._get_distances(x_array, self._stm_samples) return self.predict_fct(x_array, None, distances_stm) def predict_proba_one(self, x): raise NotImplementedError @staticmethod def _get_maj_label(distances, labels, n_neighbors): nn_indices = libNearestNeighbor.nArgMin(n_neighbors, distances) if not isinstance(labels, type(np.array([]))): labels = np.asarray(labels, dtype=np.int8) else: labels = np.int8(labels) pred_labels = libNearestNeighbor.mostCommon(labels[nn_indices]) return pred_labels @staticmethod def _get_distance_weighted_label(distances, labels, n_neighbors): nn_indices = libNearestNeighbor.nArgMin(n_neighbors, distances) sqrtDistances = np.sqrt(distances[nn_indices]) if not isinstance(labels, type(np.array([]))): labels = np.asarray(labels, dtype=np.int8) else: labels = np.int8(labels) predLabels = libNearestNeighbor.getLinearWeightedLabels( labels[nn_indices], sqrtDistances ) return predLabels @property def STMSamples(self): return self._stm_samples @property def STMLabels(self): return self._stm_labels @property def LTMSamples(self): return self._ltm_samples @property def LTMLabels(self): return self._ltm_labels class STMSizer: @staticmethod
BSD 3-Clause New or Revised License
jnewland/ha-config
custom_components/senseme/fan.py
HASensemeFan.__init__
python
def __init__(self, device: SensemeFan) -> None: super().__init__(device, device.name)
Initialize the entity.
https://github.com/jnewland/ha-config/blob/aeb36a624acb1119b09c06d8ac70e4ec43b9f7fa/custom_components/senseme/fan.py#L53-L55
import math from typing import Any, List, Optional import voluptuous as vol from aiosenseme import SensemeFan from homeassistant.components.fan import ( DIRECTION_FORWARD, DIRECTION_REVERSE, SUPPORT_DIRECTION, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.const import CONF_DEVICE from homeassistant.util.percentage import ( percentage_to_ranged_value, ranged_value_to_percentage, ) from homeassistant.util.temperature import convert, TEMP_CELSIUS from homeassistant.helpers import config_validation as cv, entity_platform, service from . import SensemeEntity from .const import ( DOMAIN, PRESET_MODE_WHOOSH, SENSEME_DIRECTION_FORWARD, SENSEME_DIRECTION_REVERSE, ) async def async_setup_entry(hass, entry, async_add_entities): platform = entity_platform.current_platform.get() device = hass.data[DOMAIN][entry.entry_id][CONF_DEVICE] if device.is_fan: async_add_entities([HASensemeFan(device)]) platform.async_register_entity_service( "smart_mode", { vol.Required("auto_comfort"): cv.string, vol.Optional("cool_temp"): cv.positive_float, vol.Optional("cool_min_speed"): cv.positive_int, vol.Optional("cool_max_speed"): cv.positive_int, }, "async_set_smart_mode", ) class HASensemeFan(SensemeEntity, FanEntity):
MIT License
wildmeorg/wildbook-ia
wbia/dbio/export_subset.py
export_images
python
def export_images(ibs, gid_list, new_dbpath=None): logger.info('Exporting image gid_list=%r' % (gid_list,)) if new_dbpath is None: new_dbpath = make_new_dbpath(ibs, 'gid', gid_list) aid_list = ut.unique_unordered(ut.flatten(ibs.get_image_aids(gid_list))) nid_list = ut.unique_unordered(ibs.get_annot_nids(aid_list)) return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath)
exports a subset of images and other required info TODO: PZ_Master1 needs to backproject information back on to NNP_Master3 and PZ_Master0 Args: ibs (IBEISController): wbia controller object gid_list (list): list of annotation rowids new_dbpath (None): (default = None) Returns: str: new_dbpath
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/dbio/export_subset.py#L288-L310
import logging import utool as ut from wbia.other import ibsfuncs from wbia import constants as const (print, rrr, profile) = ut.inject2(__name__) logger = logging.getLogger('wbia') def check_merge(ibs_src, ibs_dst): aid_list1 = ibs_src.get_valid_aids() gid_list1 = ibs_src.get_annot_gids(aid_list1) gname_list1 = ibs_src.get_image_uris(gid_list1) image_uuid_list1 = ibs_src.get_image_uuids(gid_list1) gid_list2 = ibs_dst.get_image_gids_from_uuid(image_uuid_list1) gname_list2 = ibs_dst.get_image_uris(gid_list2) ut.assert_all_not_None(gid_list1, 'gid_list1') ut.assert_all_not_None(gid_list2, 'gid_list2') ut.assert_lists_eq(gname_list1, gname_list2, 'faild gname') image_uuid_list2 = ibs_dst.get_image_uuids(gid_list2) ut.assert_lists_eq(image_uuid_list1, image_uuid_list2, 'failed uuid') aids_list1 = ibs_src.get_image_aids(gid_list1) aids_list2 = ibs_dst.get_image_aids(gid_list2) avuuids_list1 = ibs_src.unflat_map(ibs_src.get_annot_visual_uuids, aids_list1) avuuids_list2 = ibs_dst.unflat_map(ibs_dst.get_annot_visual_uuids, aids_list2) issubset_list = [ set(avuuids1).issubset(set(avuuids2)) for avuuids1, avuuids2 in zip(avuuids_list1, avuuids_list2) ] assert all(issubset_list), 'ibs_src must be a subset of ibs_dst: issubset_list=%r' % ( issubset_list, ) logger.info('Merge seems ok...') def merge_databases(ibs_src, ibs_dst, rowid_subsets=None, localize_images=True): logger.info( 'BEGIN MERGE OF %r into %r' % (ibs_src.get_dbname(), ibs_dst.get_dbname()) ) ibs_dst.update_annot_visual_uuids(ibs_dst.get_valid_aids()) ibs_src.update_annot_visual_uuids(ibs_src.get_valid_aids()) ibs_src.ensure_contributor_rowids() ibs_dst.ensure_contributor_rowids() ibs_src.fix_invalid_annotmatches() ibs_dst.fix_invalid_annotmatches() if rowid_subsets is not None and const.IMAGE_TABLE in rowid_subsets: src_gid_list = rowid_subsets[const.IMAGE_TABLE] else: src_gid_list = ibs_src.get_valid_gids() imgpath_list = ibs_src.get_image_paths(src_gid_list) dst_imgdir = ibs_dst.get_imgdir() if localize_images: ut.copy_files_to(imgpath_list, dst_imgdir, overwrite=False, verbose=True) ignore_tables = [ 'lblannot', 'lblimage', 'image_lblimage_relationship', 'annotation_lblannot_relationship', 'keys', ] error_tables = [ 'imageset_image_relationship', 'annotgroup_annotation_relationship', 'annotmatch', ] ignore_tables += error_tables ibs_dst.db.merge_databases_new( ibs_src.db, ignore_tables=ignore_tables, rowid_subsets=rowid_subsets ) blacklist_set = set( [ 'Reviewed Images', 'Exemplars', '*Exemplars', 'All Images', '*All Images', '*Undetected Images', '*Ungrouped Images', ] ) imageset_dict = {} src_guuids = ibs_src.get_image_uuids(src_gid_list) src_texts_list = ibs_src.get_image_imagesettext(src_gid_list) for src_guuid, src_text_list in zip(src_guuids, src_texts_list): current_set = imageset_dict.get(src_guuid, set([])) src_text_set = set(src_text_list) - blacklist_set src_text_set_ = set([]) for src_text in src_text_set: src_text_ = '%s / %s' % ( ibs_src.dbname, src_text, ) src_text_set_.add(src_text_) src_text_set = src_text_set_ | current_set imageset_dict[src_guuid] = src_text_set dst_guuids = list(imageset_dict.keys()) dst_gid_list = ibs_dst.get_image_gids_from_uuid(dst_guuids) assert None not in dst_gid_list dst_text_set_list = [list(imageset_dict[dst_guuid]) for dst_guuid in dst_guuids] length_list = map(len, dst_text_set_list) zipped = zip(dst_gid_list, length_list) dst_gid_list = ut.flatten([[dst_gid] * length for dst_gid, length in zipped]) dst_text_list = ut.flatten(dst_text_set_list) assert len(dst_gid_list) == len(dst_text_list) ibs_dst.set_image_imagesettext(dst_gid_list, dst_text_list) src_image_uuids = ibs_src.get_image_uuids(src_gid_list) dst_gid_list = ibs_dst.get_image_gids_from_uuid(src_image_uuids) assert None not in dst_gid_list timestamp = ut.timestamp(format_='printable').split()[1] imageset_text = 'Import from %s on %s' % ( ibs_src.dbname, timestamp, ) ibs_dst.set_image_imagesettext(dst_gid_list, [imageset_text] * len(dst_gid_list)) logger.info( 'FINISHED MERGE %r into %r' % (ibs_src.get_dbname(), ibs_dst.get_dbname()) ) def make_new_dbpath(ibs, id_label, id_list): import wbia tag_hash = ut.hashstr_arr(id_list, hashlen=8, alphabet=ut.ALPHABET_27) base_fmtstr = ( ibs.get_dbname() + '_' + id_label + 's=' + tag_hash.replace('(', '_').replace(')', '_') + '_%d' ) dpath = wbia.get_workdir() new_dbpath = ut.non_existing_path(base_fmtstr, dpath) return new_dbpath def export_names(ibs, nid_list, new_dbpath=None): logger.info('Exporting name nid_list=%r' % (nid_list,)) if new_dbpath is None: new_dbpath = make_new_dbpath(ibs, 'nid', nid_list) aid_list = ut.flatten(ibs.get_name_aids(nid_list)) gid_list = ut.unique_unordered(ibs.get_annot_gids(aid_list)) return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath) def find_gid_list(ibs, min_count=500, ensure_annots=False): import random gid_list = ibs.get_valid_gids() reviewed_list = ibs.get_image_reviewed(gid_list) if ensure_annots: aids_list = ibs.get_image_aids(gid_list) reviewed_list = [ 0 if len(aids) == 0 else reviewed for aids, reviewed in zip(aids_list, reviewed_list) ] gid_list = [gid for gid, reviewed in zip(gid_list, reviewed_list) if reviewed == 1] if len(gid_list) < min_count: return None while len(gid_list) > min_count: index = random.randint(0, len(gid_list) - 1) del gid_list[index] return gid_list def __export_reviewed_subset(ibs, min_count=500, ensure_annots=False): from os.path import join gid_list = find_gid_list(ibs, min_count=min_count, ensure_annots=ensure_annots) if gid_list is None: return None new_dbpath = '/' + join('Datasets', 'BACKGROUND', ibs.dbname) logger.info('Exporting to %r with %r images' % (new_dbpath, len(gid_list))) return export_images(ibs, gid_list, new_dbpath=new_dbpath)
Apache License 2.0
lorinkoz/django-pgschemas
django_pgschemas/contrib/channels3/router.py
TenantProtocolRouter.get_protocol_type_router
python
def get_protocol_type_router(self, tenant_prefix, ws_urlconf): return TenantAwareProtocolTypeRouter( {"websocket": TenantAuthMiddlewareStack(URLRouter(ws_urlconf))}, tenant_prefix )
Subclasses can override this to include more protocols.
https://github.com/lorinkoz/django-pgschemas/blob/445b91cb313223aa908b72d466ce4cbd24418d09/django_pgschemas/contrib/channels3/router.py#L73-L79
from channels.db import database_sync_to_async from channels.routing import ProtocolTypeRouter, URLRouter from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import force_text from django.utils.module_loading import import_string from ...schema import SchemaDescriptor from ...utils import get_domain_model, remove_www from .auth import TenantAuthMiddlewareStack class TenantAwareProtocolTypeRouter(ProtocolTypeRouter): def __init__(self, application_mapping, tenant_prefix): self.tenant_prefix = tenant_prefix super().__init__(application_mapping) async def __call__(self, scope, receive, send): if scope["type"] != "http": scope["path"] = scope["path"][len(self.tenant_prefix) + 1 :] return await super().__call__(scope, receive, send) class TenantProtocolRouter: def __init__(self): self.root_ws_urlconf = settings.TENANTS["default"].get("WS_URLCONF") if self.root_ws_urlconf is None: raise ImproperlyConfigured( "TENANTS['default'] must contain a 'WS_URLCONF' key in order to use TenantProtocolRouter." ) @database_sync_to_async def get_tenant_scope(self, scope): hostname = force_text(dict(scope["headers"]).get(b"host", b"")) hostname = remove_www(hostname.split(":")[0]) tenant = None ws_urlconf = self.root_ws_urlconf for schema, data in settings.TENANTS.items(): if schema in ["public", "default"]: continue if hostname in data["DOMAINS"]: tenant = SchemaDescriptor.create(schema_name=schema, domain_url=hostname) if "WS_URLCONF" in data: ws_urlconf = data["WS_URLCONF"] return tenant, "", import_string(ws_urlconf + ".urlpatterns") else: DomainModel = get_domain_model() prefix = scope["path"].split("/")[1] try: domain = DomainModel.objects.select_related("tenant").get(domain=hostname, folder=prefix) except DomainModel.DoesNotExist: try: domain = DomainModel.objects.select_related("tenant").get(domain=hostname, folder="") except DomainModel.DoesNotExist: return None, "", [] tenant = domain.tenant tenant.domain_url = hostname ws_urlconf = settings.TENANTS["default"]["WS_URLCONF"] return tenant, prefix if prefix == domain.folder else "", import_string(ws_urlconf + ".urlpatterns")
MIT License
tensorflow/tensor2tensor
tensor2tensor/data_generators/generator_utils.py
SequenceDatasetPacker._concurrent_pack
python
def _concurrent_pack(self, dataset, window_size=None, cycle_length=None, keys=None): if window_size is None: window_size = int(self._packed_length / 8 * self._queue_size * 10) if cycle_length is None: cycle_length = max([int(multiprocessing.cpu_count() / 3), 1]) return self._pack(dataset, window_size=window_size, cycle_length=cycle_length, keys=keys)
Selects sensible default parallelism parameters based for a task.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/data_generators/generator_utils.py#L809-L830
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import gzip import math import multiprocessing import os import random import stat import tarfile import tempfile import numpy as np import requests import six from six.moves import range import six.moves.urllib_request as urllib from tensor2tensor.data_generators import text_encoder from tensor2tensor.utils import mlperf_log import tensorflow.compat.v1 as tf UNSHUFFLED_SUFFIX = "-unshuffled" def to_example(dictionary): features = {} for (k, v) in six.iteritems(dictionary): if not v: raise ValueError("Empty generated field: %s" % str((k, v))) if six.PY3 and isinstance(v, map): v = list(v) if (isinstance(v[0], six.integer_types) or np.issubdtype(type(v[0]), np.integer)): features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) elif isinstance(v[0], float): features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v)) elif isinstance(v[0], six.string_types): if not six.PY2: v = [bytes(x, "utf-8") for x in v] features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) elif isinstance(v[0], bytes): features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) else: raise ValueError("Value for %s is not a recognized type; v: %s type: %s" % (k, str(v[0]), str(type(v[0])))) return tf.train.Example(features=tf.train.Features(feature=features)) def generate_files_distributed(generator, output_name, output_dir, num_shards=1, max_cases=None, task_id=0): assert task_id < num_shards output_filename = sharded_name(output_name, task_id, num_shards) output_file = os.path.join(output_dir, output_filename) tf.logging.info("Writing to file %s", output_file) writer = tf.python_io.TFRecordWriter(output_file) counter = 0 for case in generator: if counter % 100000 == 0: tf.logging.info("Generating case %d for %s." % (counter, output_name)) counter += 1 if max_cases and counter > max_cases: break example = to_example(case) writer.write(example.SerializeToString()) writer.close() return output_file def _data_filenames(output_name, output_dir, num_shards): return [ os.path.join(output_dir, fname) for fname in shard_filepath(output_name, num_shards) ] def train_data_filenames(problem, output_dir, num_shards): return _data_filenames(problem + "-train", output_dir, num_shards) def dev_data_filenames(problem, output_dir, num_shards): return _data_filenames(problem + "-dev", output_dir, num_shards) def test_data_filenames(problem, output_dir, num_shards): return _data_filenames(problem + "-test", output_dir, num_shards) def combined_data_filenames(problem, output_dir, num_training_shards): return (train_data_filenames(problem, output_dir, num_training_shards) + dev_data_filenames(problem, output_dir, 1) + test_data_filenames( problem, output_dir, 1)) def sharded_name(base_name, shard, total_shards): return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards) def shard_filepath(fname, num_shards): return [ sharded_name(fname, shard, num_shards) for shard in range(num_shards) ] def outputs_exist(filenames): for out_fname in filenames: out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "") if tf.gfile.Exists(out_fname): return out_fname def generate_files(generator, output_filenames, max_cases=None, cycle_every_n=1): if outputs_exist(output_filenames): tf.logging.info("Skipping generator because outputs files exists at {}" .format(output_filenames)) return tmp_filenames = [fname + ".incomplete" for fname in output_filenames] num_shards = len(output_filenames) if num_shards > 0: if "-train" in output_filenames[0]: tag = "train" elif "-dev" in output_filenames[0]: tag = "eval" else: tag = "other" writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames] counter, shard = 0, 0 for case in generator: if case is None: continue if counter % 100000 == 0: tf.logging.info("Generating case %d." % counter) counter += 1 if max_cases and counter > max_cases: break example = to_example(case) writers[shard].write(example.SerializeToString()) if counter % cycle_every_n == 0: shard = (shard + 1) % num_shards for writer in writers: writer.close() for tmp_name, final_name in zip(tmp_filenames, output_filenames): tf.gfile.Rename(tmp_name, final_name) if num_shards > 0: if tag == "train": mlperf_log.transformer_print( key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter) elif tag == "eval": mlperf_log.transformer_print( key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter) tf.logging.info("Generated %s Examples", counter) def download_report_hook(count, block_size, total_size): percent = int(count * block_size * 100 / total_size) print("\r%d%%" % percent + " completed", end="\r") def maybe_download(directory, filename, uri): tf.gfile.MakeDirs(directory) filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): tf.logging.info("Not downloading, file already found: %s" % filepath) return filepath tf.logging.info("Downloading %s to %s" % (uri, filepath)) try: tf.gfile.Copy(uri, filepath) except tf.errors.UnimplementedError: if uri.startswith("http"): inprogress_filepath = filepath + ".incomplete" inprogress_filepath, _ = urllib.urlretrieve( uri, inprogress_filepath, reporthook=download_report_hook) print() tf.gfile.Rename(inprogress_filepath, filepath) else: raise ValueError("Unrecognized URI: " + filepath) statinfo = os.stat(filepath) tf.logging.info("Successfully downloaded %s, %s bytes." % (filename, statinfo.st_size)) return filepath def maybe_download_from_drive(directory, filename, url): if not tf.gfile.Exists(directory): tf.logging.info("Creating directory %s" % directory) tf.gfile.MakeDirs(directory) filepath = os.path.join(directory, filename) confirm_token = None if tf.gfile.Exists(filepath): tf.logging.info("Not downloading, file already found: %s" % filepath) return filepath confirm_token = None session = requests.Session() response = session.get(url, stream=True) for k, v in response.cookies.items(): if k.startswith("download_warning"): confirm_token = v if confirm_token: url = url + "&confirm=" + confirm_token tf.logging.info("Downloading %s to %s" % (url, filepath)) response = session.get(url, stream=True) chunk_size = 16 * 1024 with open(filepath, "wb") as f: for chunk in response.iter_content(chunk_size): if chunk: f.write(chunk) print() statinfo = os.stat(filepath) tf.logging.info("Successfully downloaded %s, %s bytes." % (filename, statinfo.st_size)) return filepath def gunzip_file(gz_path, new_path): if tf.gfile.Exists(new_path): tf.logging.info("File %s already exists, skipping unpacking" % new_path) return tf.logging.info("Unpacking %s to %s" % (gz_path, new_path)) mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH os.chmod(os.path.dirname(new_path), mode) with gzip.open(gz_path, "rb") as gz_file: with tf.gfile.GFile(new_path, mode="wb") as new_file: for line in gz_file: new_file.write(line) def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generator, max_subtoken_length=None, reserved_tokens=None): if data_dir and vocab_filename: vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info("Found vocab file: %s", vocab_filepath) return text_encoder.SubwordTextEncoder(vocab_filepath) else: vocab_filepath = None tf.logging.info("Generating vocab file: %s", vocab_filepath) vocab = text_encoder.SubwordTextEncoder.build_from_generator( generator, vocab_size, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens) if vocab_filepath: tf.gfile.MakeDirs(data_dir) vocab.store_to_file(vocab_filepath) return vocab def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size, sources, file_byte_budget=1e6, max_subtoken_length=None): vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget) return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, vocab_generator, max_subtoken_length) def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6): tf.logging.info("Generating vocab from: %s", str(sources)) for source in sources: url = source[0] filename = os.path.basename(url) compressed_file = maybe_download(tmp_dir, filename, url) for lang_file in source[1]: tf.logging.info("Reading file: %s" % lang_file) filepath = os.path.join(tmp_dir, lang_file) if not tf.gfile.Exists(filepath): read_type = "r:gz" if filename.endswith("tgz") else "r" with tarfile.open(compressed_file, read_type) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang_file.endswith(".gz"): new_filepath = os.path.join(tmp_dir, lang_file[:-3]) if tf.gfile.Exists(new_filepath): tf.logging.info( "Subdirectory %s already exists, skipping unpacking" % filepath) else: tf.logging.info("Unpacking subdirectory %s" % filepath) gunzip_file(filepath, new_filepath) filepath = new_filepath with tf.gfile.GFile(filepath, mode="r") as source_file: file_byte_budget_ = file_byte_budget counter = 0 countermax = int(source_file.size() / file_byte_budget_ / 2) for line in source_file: if counter < countermax: counter += 1 else: if file_byte_budget_ <= 0: break line = line.strip() file_byte_budget_ -= len(line) counter = 0 yield line def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename, index, vocab_filename, vocab_size): def generate(): filepath = os.path.join(tmp_dir, source_filename) tf.logging.info("Generating vocab from %s", filepath) with tf.gfile.GFile(filepath, mode="r") as source_file: for line in source_file: line = line.strip() if line and "\t" in line: parts = line.split("\t", 1) part = parts[index].strip() yield part return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generate()) def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size, filepatterns): if isinstance(filepatterns, str): filepatterns = [filepatterns] def generate(): tf.logging.info("Generating vocab from %s", filepatterns) for filepattern in filepatterns: for filename in tf.gfile.Glob(filepattern): with tf.gfile.GFile(filename, mode="r") as source_file: for line in source_file: yield line.strip() return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generate()) def read_records(filename): reader = tf.python_io.tf_record_iterator(filename) records = [] for record in reader: records.append(record) if len(records) % 100000 == 0: tf.logging.info("read: %d", len(records)) return records def write_records(records, out_filename): writer = tf.python_io.TFRecordWriter(out_filename) for count, record in enumerate(records): writer.write(record) if count > 0 and count % 100000 == 0: tf.logging.info("write: %d", count) writer.close() def generate_dataset_and_shuffle(train_gen, train_paths, dev_gen, dev_paths, shuffle=True): generate_files(train_gen, train_paths) generate_files(dev_gen, dev_paths) mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) if shuffle: shuffle_dataset(train_paths + dev_paths) def _shuffle_single(fname, extra_fn=None): records = read_records(fname) random.shuffle(records) if extra_fn is not None: records = extra_fn(records) out_fname = fname.replace(UNSHUFFLED_SUFFIX, "") write_records(records, out_fname) tf.gfile.Remove(fname) def shuffle_dataset(filenames, extra_fn=None): if outputs_exist(filenames): tf.logging.info("Skipping shuffle because output files exist") return tf.logging.info("Shuffling data...") for filename in filenames: _shuffle_single(filename, extra_fn=extra_fn) tf.logging.info("Data shuffled.") class SequencePacker(object): def __init__(self, first_sequence, spacing=2): self._spacing = spacing self._ids = first_sequence[:] self._segmentation = [1] * len(first_sequence) self._position = list(range(len(first_sequence))) def add(self, ids): padding = [0] * self._spacing self._ids.extend(padding + ids) next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1 self._segmentation.extend(padding + [next_segment_num] * len(ids)) self._position.extend(padding + list(range(len(ids)))) def can_fit(self, ids, packed_length): return len(self._ids) + self._spacing + len(ids) <= packed_length def to_dict(self): return {"inputs": [0], "targets": self._ids, "targets_segmentation": self._segmentation, "targets_position": self._position} class SequencePairPacker(object): def __init__(self, first_sequence_pair, spacing=2): self._inputs = SequencePacker(first_sequence_pair[0], spacing) self._targets = SequencePacker(first_sequence_pair[1], spacing) def add(self, pair): self._inputs.add(pair[0]) self._targets.add(pair[1]) def can_fit(self, pair, packed_length): return (self._inputs.can_fit(pair[0], packed_length) and self._targets.can_fit(pair[1], packed_length)) def to_dict(self): ret = self._targets.to_dict() inputs_dict = self._inputs.to_dict() ret["inputs"] = inputs_dict["targets"] ret["inputs_segmentation"] = inputs_dict["targets_segmentation"] ret["inputs_position"] = inputs_dict["targets_position"] return ret def pack_examples(examples, has_inputs, packed_length=256, spacing=2, queue_size=10, chop_long_sequences=False): packer = SequencePairPacker if has_inputs else SequencePacker combined = [] for example in examples: x = ((example["inputs"], example["targets"]) if has_inputs else example["targets"]) if chop_long_sequences and len(x) > packed_length: assert not has_inputs num_fragments = len(x) // packed_length for i in range(num_fragments): yield packer( x[packed_length * i:packed_length * (i + 1)], spacing).to_dict() x = x[packed_length * num_fragments:] added = False for c in combined: if c.can_fit(x, packed_length): c.add(x) added = True break if not added: if len(combined) == queue_size: yield combined[0].to_dict() combined = combined[1:] combined.append(packer(x, spacing)) for c in combined: yield c.to_dict() def pack_dataset(dataset, length, keys=None, use_custom_ops=False): shapes = dataset.output_shapes if keys is None: keys = shapes.keys() for k in keys: if k not in shapes: raise ValueError("Key %s not found in dataset. Available keys are %s" % (k, shapes.keys())) if not shapes[k].is_compatible_with(tf.TensorShape([None])): raise ValueError("Tensors to be packed must be one-dimensional.") if use_custom_ops: return _pack_with_custom_ops(dataset, keys, length) else: packer = SequenceDatasetPacker(length, spacing=0, queue_size=10) return packer(dataset, cycle_length=10, keys=keys) def _pack_with_custom_ops(dataset, keys, length): from tensor2tensor.data_generators.ops import pack_sequences_ops dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys}) batch_size = length dataset = dataset.padded_batch( batch_size, padded_shapes={k: [-1] for k in keys}) k1, k2 = keys def map_fn_custom(x): (k1_packed, k1_segmengation, k1_position, k2_packed, k2_segmentation, k2_position) = ( pack_sequences_ops.pack_sequences2(x[k1], x[k2], length, length)) packed = { k1: k1_packed, k1 + "_segmentation": k1_segmengation, k1 + "_position": k1_position, k2: k2_packed, k2 + "_segmentation": k2_segmentation, k2 + "_position": k2_position, } return tf.data.Dataset.from_tensor_slices(packed) dataset = dataset.flat_map(map_fn_custom) return dataset INDEX_DTYPE = tf.int32 class SequenceDatasetPacker(object): def __init__(self, packed_length=256, spacing=0, queue_size=10, chop_long_sequences=False): self._packed_length = packed_length self._spacing = spacing self._queue_size = queue_size self._chop_long_sequences = chop_long_sequences self._num_sequences = None self._token_dtype = None def __call__(self, dataset, **kwargs): if {"window_size", "cycle_length"}.intersection(kwargs): return self._concurrent_pack(dataset, **kwargs) return self._pack(dataset, **kwargs)
Apache License 2.0
matsui528/rii
rii/rii.py
Rii.query
python
def query(self, q, topk=1, L=None, target_ids=None, sort_target_ids=True, method="auto"): assert 0 < self.N assert 0 < self.nlist assert method in ["auto", "linear", "ivf"] if topk is None: topk = self.N assert 1 <= topk <= self.N if L is None: L = self._multiple_of_L0_covering_topk(topk=topk) assert topk <= L <= self.N, "Parameters are weird. Make sure topk<=L<=N: topk={}, L={}, N={}".format(topk, L, self.N) if target_ids is None: tids = np.array([], dtype=np.int64) len_target_ids = self.N else: assert isinstance(target_ids, np.ndarray) assert target_ids.dtype == np.int64 assert target_ids.ndim == 1 if sort_target_ids: tids = np.sort(target_ids) else: tids = target_ids len_target_ids = len(tids) assert topk <= len_target_ids <= self.N, "Parameters are weird. Make sure topk<=len(target_ids)<=N: " "topk={}, len(target_ids)={}, N={}".format(topk, len_target_ids, self.N) if isinstance(self.fine_quantizer, nanopq.OPQ): q_ = self.fine_quantizer.rotate(q) elif isinstance(self.fine_quantizer, nanopq.PQ): q_ = q if method == "auto": if self._use_linear(len_target_ids, L): ids, dists = self.impl_cpp.query_linear(q_, topk, tids) else: ids, dists = self.impl_cpp.query_ivf(q_, topk, tids, L) elif method == "linear": ids, dists = self.impl_cpp.query_linear(q_, topk, tids) elif method == "ivf": ids, dists = self.impl_cpp.query_ivf(q_, topk, tids, L) return np.array(ids, np.int64), np.array(dists)
Given a query vector, run the approximate nearest neighbor search over the stored PQ-codes. This functions returns the identifiers and the distances of ``topk`` nearest PQ-codes to the query. The search can be conducted over a subset of the whole PQ-codes by specifying ``target_ids``. For example, if ``target_ids=np.array([23, 567, 998])``, the search result would be the items with these identifiers, sorted by the distance to the query. Inside this function, the algorithm for the search is selected either from PQ-linear-scan (see Alg. 1 in the paper) or inverted-index (see Alg. 2 in the paper). This is specified by ``method``, by setting 'linear' or 'ivf'. If 'auto' is set, the faster one is automatically selected (See Alg. 3 in the paper for more details). See :ref:`guideline_for_search` for tips of the parameter selection. Args: q (np.ndarray): The query vector with the shape=(D, ) and dtype=np.float32. topk (int): The number of PQ-codes to be returned. The default value is 1. L (int): The number of PQ-codes for the candidates of distance evaluation. With a higher ``L`` value, the accuracy is boosted but the runtime gets slower. The default value is a minimum multiple of :attr:`L0` that covers ``topk``. This is typically :attr:`L0`. Note that ``L`` is used only if the search method is inverted-index. target_ids (np.ndarray): The target identifiers with the shape=(S, ) and dtype=np.int64, where S can be any scalar. The default value is None, then the search is run over the whole dataset. Note that ``target_ids`` does not need to be sorted if ``sort_target_ids==True``, where it will be sorted inside this function automatically. Otherwise please sort ``target_ids`` before calling this function. sort_target_ids (bool): The default value is True. If True, ``target_ids`` will be sorted automatically inside this function before running the search. If False, ``target_ids`` will not be sorted. Note that ``target_ids`` must be sorted before the search, so please sort it by yourself if you set ``sort_target_ids`` as False. method (str): The search algorithm to be used: 'linear', 'ivf', or 'auto'. Returns: (np.ndarray, np.ndarray): The result (nearest items) of the search. The first one is the identifiers of the items, with the shape=(topk, ) and dtype=int64. The second one is the distances of the items to the query, with the shape=(topk, ) and dtype=float64
https://github.com/matsui528/rii/blob/955f17b6615dd2b370470f241b3812039867e4e3/rii/rii.py#L235-L320
import main import nanopq import numpy as np import copy class Rii(object): def __init__(self, fine_quantizer): assert isinstance(fine_quantizer, nanopq.PQ) or isinstance(fine_quantizer, nanopq.OPQ) assert fine_quantizer.codewords is not None, "Please fit the PQ/OPQ instance first" assert fine_quantizer.Ks <= 256, "Ks must be less than 256 so that each code must be uint8" self.fine_quantizer = copy.deepcopy(fine_quantizer) self.impl_cpp = main.RiiCpp(fine_quantizer.codewords, fine_quantizer.verbose) self.threshold = None @property def M(self): return self.fine_quantizer.M @property def Ks(self): return self.fine_quantizer.Ks @property def N(self): return self.impl_cpp.N @property def nlist(self): return self.impl_cpp.nlist @property def codewords(self): return self.fine_quantizer.codewords @property def coarse_centers(self): if self.nlist == 0: return None else: return np.array(self.impl_cpp.coarse_centers, dtype=self.fine_quantizer.code_dtype) @property def codes(self): if self.N == 0: return None else: return np.array(self.impl_cpp.flattened_codes, dtype=self.fine_quantizer.code_dtype).reshape(self.N, self.M) @property def posting_lists(self): return self.impl_cpp.posting_lists @property def verbose(self): return self.impl_cpp.verbose @verbose.setter def verbose(self, v): self.fine_quantizer.verbose = v self.impl_cpp.verbose = v @property def L0(self): if self.nlist == 0: return None else: return int(np.round(self.N / self.nlist)) def reconfigure(self, nlist=None, iter=5): if nlist is None: nlist = int(np.sqrt(self.N)) assert 0 < nlist self.impl_cpp.reconfigure(nlist, iter) self.threshold = estimate_best_threshold_function( e=self, queries=self.fine_quantizer.decode(self.codes[:min(100, self.N)])) def add(self, vecs, update_posting_lists="auto"): assert vecs.ndim == 2 assert vecs.dtype == np.float32 self.impl_cpp.add_codes(self.fine_quantizer.encode(vecs), self._resolve_update_posting_lists_flag(update_posting_lists)) def add_configure(self, vecs, nlist=None, iter=5): self.add(vecs=vecs, update_posting_lists=False) self.reconfigure(nlist=nlist, iter=iter) return self def merge(self, engine, update_posting_lists='auto'): assert isinstance(engine, Rii) assert self.fine_quantizer == engine.fine_quantizer, "Two engines to be merged must have the same fine quantizer" if engine.N != 0: self.impl_cpp.add_codes(engine.codes, self._resolve_update_posting_lists_flag(update_posting_lists)) if self.verbose: print("The number of codes: {}".format(self.N))
MIT License
nitipit/shelfdb
shelfdb/server.py
QueryHandler.slice
python
def slice(self, args): self.chain_query = self.chain_query.slice(*args) return self
`args` should be [start, stop, step]
https://github.com/nitipit/shelfdb/blob/92af9c9f4912b7181c7664bc6ab3383def1328e0/shelfdb/server.py#L79-L82
import asyncio import dill import re from datetime import datetime import argparse import os import sys import shelfdb class QueryHandler(): def __init__(self, db, shelf, queries): self.chain_query = db.shelf(shelf) self.queries = queries def count(self): self.chain_query = self.chain_query.count() return self def delete(self): self.chain_query = self.chain_query.delete() return self def edit(self, func): self.chain_query = self.chain_query.edit(func) return self def first(self, filter_): self.chain_query = self.chain_query.first(filter_) return self def filter(self, filter_): self.chain_query = self.chain_query.filter(filter_) return self def get(self, id_): self.chain_query = self.chain_query.get(id_) return self def insert(self, data): self.chain_query = self.chain_query.insert(data) return self def map(self, fn): self.chain_query = self.chain_query.map(fn) return self def put(self, args): self.chain_query = self.chain_query.put(*args) return self def reduce(self, fn): self.chain_query = self.chain_query.reduce(fn) return self def replace(self, data): self.chain_query = self.chain_query.replace(data) return self
MIT License
ozflux/pyfluxpro2.7
scripts/pfp_gfSOLO.py
gfSOLO_plot
python
def gfSOLO_plot(pd, ds, drivers, target, output, l5s, si=0, ei=-1): ts = int(ds.globalattributes['time_step']) xdt = ds.series["DateTime"]["Data"][si:ei+1] Hdh = numpy.array([dt.hour+(dt.minute+dt.second/float(60))/float(60) for dt in xdt]) obs, _, _ = pfp_utils.GetSeriesasMA(ds, target, si=si, ei=ei) mod, _, _ = pfp_utils.GetSeriesasMA(ds, output, si=si, ei=ei) if l5s["gui"]["show_plots"]: plt.ion() else: plt.ioff() if plt.fignum_exists(1): fig = plt.figure(1) plt.clf() else: fig = plt.figure(1, figsize=(13, 8)) fig.canvas.set_window_title(target) title = l5s["info"]["site_name"] + " : Comparison of tower and SOLO data for " + target plt.figtext(0.5, 0.95, title, ha='center', size=16) rect1 = [0.10, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]] ax1 = plt.axes(rect1) mask = numpy.ma.mask_or(obs.mask, mod.mask) obs_mor = numpy.ma.array(obs, mask=mask) _, Hr1, Av1, _, _, _ = gf_getdiurnalstats(Hdh, obs_mor, ts) ax1.plot(Hr1, Av1, 'b-', label="Obs") _, Hr2, Av2, _, _, _ = gf_getdiurnalstats(Hdh, mod, ts) ax1.plot(Hr2, Av2, 'r-', label="SOLO(all)") mod_mor = numpy.ma.array(mod, mask=mask) if numpy.ma.count_masked(obs) != 0: index = numpy.where(numpy.ma.getmaskarray(obs) == False)[0] _, Hr3, Av3, _, _, _ = gf_getdiurnalstats(Hdh[index], mod_mor[index], ts) ax1.plot(Hr3, Av3, 'g-', label="SOLO(obs)") plt.xlim(0, 24) plt.xticks([0, 6, 12, 18, 24]) ax1.set_ylabel(target) ax1.set_xlabel('Hour') ax1.legend(loc='upper right', frameon=False, prop={'size':8}) rect2 = [0.40, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]] ax2 = plt.axes(rect2) ax2.plot(mod, obs, 'b.') ax2.set_ylabel(target + '_obs') ax2.set_xlabel(target + '_SOLO') coefs = numpy.ma.polyfit(numpy.ma.copy(mod), numpy.ma.copy(obs), 1) xfit = numpy.ma.array([numpy.ma.min(mod), numpy.ma.max(mod)]) yfit = numpy.polyval(coefs, xfit) r = numpy.ma.corrcoef(mod, obs) ax2.plot(xfit, yfit, 'r--', linewidth=3) eqnstr = 'y = %.3fx + %.3f, r = %.3f'%(coefs[0], coefs[1], r[0][1]) ax2.text(0.5, 0.875, eqnstr, fontsize=8, horizontalalignment='center', transform=ax2.transAxes) numpoints = trap_masked_constant(numpy.ma.count(obs)) numfilled = trap_masked_constant(numpy.ma.count(mod)-numpy.ma.count(obs)) diff = mod - obs bias = trap_masked_constant(numpy.ma.average(diff)) fractional_bias = trap_masked_constant(bias/(0.5*(numpy.ma.average(obs+mod)))) l5s["outputs"][output]["results"]["Bias"].append(bias) l5s["outputs"][output]["results"]["Frac Bias"].append(fractional_bias) rmse = numpy.ma.sqrt(numpy.ma.mean((obs-mod)*(obs-mod))) data_range = numpy.ma.max(obs)-numpy.ma.min(obs) nmse = rmse/data_range plt.figtext(0.65, 0.225, 'No. points') plt.figtext(0.75, 0.225, str(numpoints)) l5s["outputs"][output]["results"]["No. points"].append(numpoints) plt.figtext(0.65, 0.200, 'No. filled') plt.figtext(0.75, 0.200, str(numfilled)) plt.figtext(0.65, 0.175, 'Nodes') plt.figtext(0.75, 0.175, str(l5s["gui"]["nodes_target"])) plt.figtext(0.65, 0.150, 'Training') plt.figtext(0.75, 0.150, str(l5s["gui"]["training"])) plt.figtext(0.65, 0.125, 'Nda factor') plt.figtext(0.75, 0.125, str(l5s["gui"]["nda_factor"])) plt.figtext(0.65, 0.100, 'Learning rate') plt.figtext(0.75, 0.100, str(l5s["gui"]["learning_rate"])) plt.figtext(0.65, 0.075, 'Iterations') plt.figtext(0.75, 0.075, str(l5s["gui"]["iterations"])) plt.figtext(0.815, 0.225, 'Slope') plt.figtext(0.915, 0.225, str(pfp_utils.round2significant(coefs[0], 4))) l5s["outputs"][output]["results"]["m_ols"].append(trap_masked_constant(coefs[0])) plt.figtext(0.815, 0.200, 'Offset') plt.figtext(0.915, 0.200, str(pfp_utils.round2significant(coefs[1], 4))) l5s["outputs"][output]["results"]["b_ols"].append(trap_masked_constant(coefs[1])) plt.figtext(0.815, 0.175, 'r') plt.figtext(0.915, 0.175, str(pfp_utils.round2significant(r[0][1], 4))) l5s["outputs"][output]["results"]["r"].append(trap_masked_constant(r[0][1])) plt.figtext(0.815, 0.150, 'RMSE') plt.figtext(0.915, 0.150, str(pfp_utils.round2significant(rmse, 4))) l5s["outputs"][output]["results"]["RMSE"].append(trap_masked_constant(rmse)) l5s["outputs"][output]["results"]["NMSE"].append(trap_masked_constant(nmse)) var_obs = numpy.ma.var(obs) plt.figtext(0.815, 0.125, 'Var (obs)') plt.figtext(0.915, 0.125, '%.4g'%(var_obs)) l5s["outputs"][output]["results"]["Var (obs)"].append(trap_masked_constant(var_obs)) var_mod = numpy.ma.var(mod) plt.figtext(0.815, 0.100, 'Var (SOLO)') plt.figtext(0.915, 0.100, '%.4g'%(var_mod)) l5s["outputs"][output]["results"]["Var (SOLO)"].append(trap_masked_constant(var_mod)) l5s["outputs"][output]["results"]["Var ratio"].append(trap_masked_constant(var_obs/var_mod)) l5s["outputs"][output]["results"]["Avg (obs)"].append(trap_masked_constant(numpy.ma.average(obs))) l5s["outputs"][output]["results"]["Avg (SOLO)"].append(trap_masked_constant(numpy.ma.average(mod))) ts_axes = [] rect = [pd["margin_left"], pd["ts_bottom"], pd["ts_width"], pd["ts_height"]] ts_axes.append(plt.axes(rect)) ts_axes[0].plot(xdt, obs, 'b.') ts_axes[0].plot(xdt, mod, 'r-') ts_axes[0].set_xlim(xdt[0], xdt[-1]) TextStr = target + '_obs (' + ds.series[target]['Attr']['units'] + ')' ts_axes[0].text(0.05, 0.85, TextStr, color='b', horizontalalignment='left', transform=ts_axes[0].transAxes) TextStr = output + '(' + ds.series[output]['Attr']['units'] + ')' ts_axes[0].text(0.85, 0.85, TextStr, color='r', horizontalalignment='right', transform=ts_axes[0].transAxes) for label, i in zip(drivers, range(1, len(drivers) + 1)): this_bottom = pd["ts_bottom"] + i*pd["ts_height"] rect = [pd["margin_left"], this_bottom, pd["ts_width"], pd["ts_height"]] ts_axes.append(plt.axes(rect, sharex=ts_axes[0])) data, flag, attr = pfp_utils.GetSeriesasMA(ds, label, si=si, ei=ei) data_notgf = numpy.ma.masked_where(flag != 0, data) data_gf = numpy.ma.masked_where(flag == 0, data) ts_axes[i].plot(xdt, data_notgf, 'b-') ts_axes[i].plot(xdt, data_gf, 'r-', linewidth=2) plt.setp(ts_axes[i].get_xticklabels(), visible=False) TextStr = label + '(' + attr['units'] + ')' ts_axes[i].text(0.05, 0.85, TextStr, color='b', horizontalalignment='left', transform=ts_axes[i].transAxes) sdt = xdt[0].strftime("%Y%m%d") edt = xdt[-1].strftime("%Y%m%d") figname = l5s["info"]["site_name"].replace(" ", "") + "_SOLO_" + target figname = figname + "_" + sdt + "_" + edt + ".png" figname = os.path.join(l5s["info"]["plot_path"], figname) fig.savefig(figname, format="png") if l5s["gui"]["show_plots"]: plt.draw() mypause(1) plt.ioff() else: plt.ion()
Plot the results of the SOLO run.
https://github.com/ozflux/pyfluxpro2.7/blob/be066919d82bacb57e5ec691115c114f259659b8/scripts/pfp_gfSOLO.py#L286-L434
import csv import datetime import logging import os import platform import subprocess import dateutil import numpy import matplotlib import matplotlib.dates as mdt import matplotlib.pyplot as plt import pylab import constants as c import pfp_ck import pfp_gf import pfp_io import pfp_utils logger = logging.getLogger("pfp_log") def GapFillUsingSOLO(main_gui, ds, l5_info, called_by): ds.returncodes["value"] = 0 ds.returncodes["message"] = "normal" pfp_gf.CheckDrivers(ds, l5_info, called_by) if ds.returncodes["value"] != 0: return ds if l5_info[called_by]["info"]["call_mode"].lower() == "interactive": gfSOLO_plotcoveragelines(ds, l5_info, called_by) gfSOLO_gui(main_gui, ds, l5_info, called_by) else: gfSOLO_run(ds, l5_info, called_by) def gfSOLO_gui(main_gui, ds, l5_info, called_by): main_gui.solo_gui.ds = ds main_gui.solo_gui.l5_info = l5_info main_gui.solo_gui.called_by = called_by main_gui.solo_gui.edit_cfg = main_gui.tabs.tab_dict[main_gui.tabs.tab_index_running] start_date = ds.series["DateTime"]["Data"][0].strftime("%Y-%m-%d %H:%M") end_date = ds.series["DateTime"]["Data"][-1].strftime("%Y-%m-%d %H:%M") main_gui.solo_gui.label_DataStartDate_value.setText(start_date) main_gui.solo_gui.label_DataEndDate_value.setText(end_date) if l5_info[called_by]["info"]["called_by"] == "GapFillLongSOLO": main_gui.solo_gui.setWindowTitle("Gap fill using SOLO (long gaps)") main_gui.solo_gui.radioButton_Manual.setChecked(True) main_gui.solo_gui.lineEdit_StartDate.setText(start_date) main_gui.solo_gui.lineEdit_EndDate.setText(end_date) main_gui.solo_gui.lineEdit_MinPercent.setText("25") main_gui.solo_gui.lineEdit_Nodes.setText("Auto") main_gui.solo_gui.checkBox_AutoComplete.setChecked(True) elif l5_info[called_by]["info"]["called_by"] == "GapFillUsingSOLO": main_gui.solo_gui.setWindowTitle("Gap fill using SOLO (short gaps)") main_gui.solo_gui.lineEdit_StartDate.setText("") main_gui.solo_gui.lineEdit_EndDate.setText("") main_gui.solo_gui.radioButton_NumberMonths.setChecked(True) main_gui.solo_gui.lineEdit_NumberMonths.setText("2") main_gui.solo_gui.lineEdit_MinPercent.setText("25") main_gui.solo_gui.lineEdit_Nodes.setText("Auto") auto_complete = l5_info[called_by]["gui"]["auto_complete"] main_gui.solo_gui.checkBox_AutoComplete.setChecked(auto_complete) elif l5_info[called_by]["info"]["called_by"] == "ERUsingSOLO": main_gui.solo_gui.setWindowTitle("ER using SOLO") main_gui.solo_gui.radioButton_Manual.setChecked(True) main_gui.solo_gui.lineEdit_StartDate.setText(start_date) main_gui.solo_gui.lineEdit_EndDate.setText(end_date) main_gui.solo_gui.lineEdit_Nodes.setText("1") main_gui.solo_gui.lineEdit_MinPercent.setText("10") main_gui.solo_gui.checkBox_AutoComplete.setChecked(True) main_gui.solo_gui.show() main_gui.solo_gui.exec_() def gfSOLO_autocomplete(ds, l5_info, called_by): l5s = l5_info[called_by] if not l5s["gui"]["auto_complete"]: return ldt = ds.series["DateTime"]["Data"] nRecs = len(ldt) for output in l5s["outputs"].keys(): not_enough_points = False target = l5s["outputs"][output]["target"] data_solo, _, _ = pfp_utils.GetSeriesasMA(ds, output) if numpy.ma.count(data_solo) == 0: continue mask_solo = numpy.ma.getmaskarray(data_solo) gapstartend = pfp_utils.contiguous_regions(mask_solo) data_obs, _, _ = pfp_utils.GetSeriesasMA(ds, target) for si_gap, ei_gap in gapstartend: min_points = int((ei_gap-si_gap)*l5s["gui"]["min_percent"]/100) num_good_points = numpy.ma.count(data_obs[si_gap: ei_gap]) while num_good_points < min_points: si_gap = max([0, si_gap - l5s["info"]["nperday"]]) ei_gap = min([nRecs-1, ei_gap + l5s["info"]["nperday"]]) if si_gap == 0 and ei_gap == nRecs-1: msg = " Unable to find enough good points in target " + target logger.error(msg) not_enough_points = True if not_enough_points: break min_points = int((ei_gap-si_gap)*l5s["gui"]["min_percent"]/100) num_good_points = numpy.ma.count(data_obs[si_gap: ei_gap]) if not_enough_points: break si = max([0, si_gap]) ei = min([len(ldt)-1, ei_gap]) l5s["run"]["startdate"] = ldt[si].strftime("%Y-%m-%d %H:%M") l5s["run"]["enddate"] = ldt[ei].strftime("%Y-%m-%d %H:%M") gfSOLO_main(ds, l5_info, called_by, outputs=[output]) if l5s["info"]["call_mode"] == "interactive": gfSOLO_plotcoveragelines(ds, l5_info, called_by) def gfSOLO_check_drivers(ds, drivers, si, ei): drivers_accept = list(drivers) drivers_reject = [] for label in drivers_accept: data = pfp_utils.GetVariable(ds, label, start=si, end=ei) var = numpy.ma.var(data["Data"]) if var == 0: drivers_accept.remove(label) drivers_reject.append(label) if len(drivers_reject) == 1: msg = " Variance is 0 for driver " + ','.join(drivers_reject) + ", not used for this period" logger.warning(msg) elif len(drivers_reject) > 1: msg = " Variance is 0 for drivers " + ','.join(drivers_reject) + ", not used for this period" logger.warning(msg) return drivers_accept def gfSOLO_done(solo_gui): ds = solo_gui.ds l5_info = solo_gui.l5_info called_by = solo_gui.called_by l5s = l5_info[called_by] cl = ["GapFillUsingSOLO", "GapFillLongSOLO"] if (l5s["gui"]["period_option"] == 1 and l5s["info"]["called_by"] in cl): pfp_io.xl_write_SOLOStats(ds, l5s) gfSOLO_plotsummary(ds, l5s) solo_gui.close() ds.returncodes["value"] = 0 ds.returncodes["message"] = "normal" def gfSOLO_getserieslist(cf): series_list = [] if "Drivers" in cf.keys(): for series in cf["Drivers"].keys(): if "GapFillUsingSOLO" in cf["Drivers"][series]: series_list.append(series) elif "Fluxes" in cf.keys(): for series in cf["Fluxes"].keys(): if "GapFillUsingSOLO" in cf["Fluxes"][series]: series_list.append(series) elif "Variables" in cf.keys(): for series in cf["Variables"].keys(): if "GapFillUsingSOLO" in cf["Variables"][series]: series_list.append(series) else: series_list = [] msg = "No Variables, Drivers or Fluxes section found in control file" logger.error(msg) return series_list def gfSOLO_initplot(nDrivers): pd = {"margin_bottom":0.075, "margin_top":0.075, "margin_left":0.05, "margin_right":0.05, "xy_height":0.20, "xy_width":0.20, "xyts_space":0.05, "ts_width":0.9} pd["ts_bottom"] = pd["margin_bottom"]+pd["xy_height"]+pd["xyts_space"] pd["ts_height"] = (1.0 - pd["margin_top"] - pd["ts_bottom"])/float(nDrivers+1) return pd def gfSOLO_main(ds, l5_info, called_by, outputs=None): l5s = l5_info[called_by] ts = int(ds.globalattributes["time_step"]) startdate = l5s["run"]["startdate"] enddate = l5s["run"]["enddate"] logger.info(" Gap filling using SOLO: " + startdate + " to " + enddate) ldt = ds.series["DateTime"]["Data"] si = pfp_utils.GetDateIndex(ldt, startdate, ts=ts, default=0, match="exact") ei = pfp_utils.GetDateIndex(ldt, enddate, ts=ts, default=len(ldt)-1, match="exact") l5s["gui"]["min_points"] = int((ei-si)*l5s["gui"]["min_percent"]/100) if outputs == None: outputs = l5s["outputs"].keys() for output in outputs: flag_code = l5s["outputs"][output]["flag_code"] target = l5s["outputs"][output]["target"] drivers = gfSOLO_check_drivers(ds, l5s["outputs"][output]["drivers"], si, ei) l5s["outputs"][output]["results"]["startdate"].append(ldt[si]) l5s["outputs"][output]["results"]["enddate"].append(ldt[ei]) d, _, _ = pfp_utils.GetSeriesasMA(ds, target, si=si, ei=ei) nRecs = len(d) if numpy.ma.count(d) < l5s["gui"]["min_points"]: msg = "SOLO: Less than " + str(l5s["gui"]["min_points"]) + " points available for target " + target logger.warning(msg) l5s["outputs"][output]["results"]["No. points"].append(float(0)) results = l5s["outputs"][output]["results"].keys() for item in ["startdate", "enddate", "No. points"]: if item in results: results.remove(item) for item in results: l5s["outputs"][output]["results"][item].append(float(c.missing_value)) continue if str(l5s["gui"]["nodes"]).lower() == "auto": l5s["gui"]["nodes_target"] = len(drivers) + 1 else: l5s["gui"]["nodes_target"] = int(l5s["gui"]["nodes"]) if "solo_settings" in l5s["outputs"][output]: l5s["gui"]["nodes_target"] = l5s["outputs"][output]["solo_settings"]["nodes_target"] l5s["gui"]["training"] = l5s["outputs"][output]["solo_settings"]["training"] l5s["gui"]["nda_factor"] = l5s["outputs"][output]["solo_settings"]["nda_factor"] l5s["gui"]["learning_rate"] = l5s["outputs"][output]["solo_settings"]["learning_rate"] l5s["gui"]["iterations"] = l5s["outputs"][output]["solo_settings"]["iterations"] gfSOLO_writeinffiles(l5s) result = gfSOLO_runsofm(ds, drivers, target, nRecs, si=si, ei=ei) if result != 1: return result = gfSOLO_runsolo(ds, drivers, target, nRecs, si=si, ei=ei) if result != 1: return result = gfSOLO_runseqsolo(ds, drivers, target, output, nRecs, flag_code, si=si, ei=ei) if result != 1: return pd = gfSOLO_initplot(len(drivers)) gfSOLO_plot(pd, ds, drivers, target, output, l5s, si=si, ei=ei)
BSD 3-Clause New or Revised License
secondmind-labs/trieste
trieste/models/interfaces.py
ModelStack.predict
python
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]: means, vars_ = zip(*[model.predict(query_points) for model in self._models]) return tf.concat(means, axis=-1), tf.concat(vars_, axis=-1)
r""" :param query_points: The points at which to make predictions, of shape [..., D]. :return: The predictions from all the wrapped models, concatenated along the event axis in the same order as they appear in :meth:`__init__`. If the wrapped models have predictive distributions with event shapes [:math:`E_i`], the mean and variance will both have shape [..., :math:`\sum_i E_i`].
https://github.com/secondmind-labs/trieste/blob/b58eb924a49ad86e27fa2e082defe2d37afcc14a/trieste/models/interfaces.py#L158-L167
from __future__ import annotations from abc import ABC, abstractmethod import gpflow import tensorflow as tf from ..data import Dataset from ..types import TensorType class ProbabilisticModel(ABC): @abstractmethod def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]: raise NotImplementedError @abstractmethod def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]: raise NotImplementedError @abstractmethod def sample(self, query_points: TensorType, num_samples: int) -> TensorType: raise NotImplementedError def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]: raise NotImplementedError( f"Model {self!r} does not support predicting observations, just the latent function" ) def get_observation_noise(self) -> TensorType: raise NotImplementedError(f"Model {self!r} does not provide scalar observation noise") def get_kernel(self) -> gpflow.kernels.Kernel: raise NotImplementedError(f"Model {self!r} does not provide a kernel") class TrainableProbabilisticModel(ProbabilisticModel): @abstractmethod def update(self, dataset: Dataset) -> None: raise NotImplementedError @abstractmethod def optimize(self, dataset: Dataset) -> None: raise NotImplementedError class ModelStack(TrainableProbabilisticModel): def __init__( self, model_with_event_size: tuple[TrainableProbabilisticModel, int], *models_with_event_sizes: tuple[TrainableProbabilisticModel, int], ): super().__init__() self._models, self._event_sizes = zip(*(model_with_event_size,) + models_with_event_sizes)
Apache License 2.0
hackersandslackers/beautifulsoup-tutorial
scraper/scrape.py
get_description
python
def get_description(html): description = None if html.find("meta", property="description"): description = html.find("meta", property="description").get('content') elif html.find("meta", property="og:description"): description = html.find("meta", property="og:description").get('content') elif html.find("meta", property="twitter:description"): description = html.find("meta", property="twitter:description").get('content') elif html.find("p"): description = html.find("p").contents return description
Scrape page description.
https://github.com/hackersandslackers/beautifulsoup-tutorial/blob/1e514ae69abf839843e7f8ee183236944b01da29/scraper/scrape.py#L22-L33
def get_title(html): title = None if html.title.string: title = html.title.string elif html.find("meta", property="og:title"): description = html.find("meta", property="og:title").get('content') elif html.find("meta", property="twitter:title"): description = html.find("meta", property="twitter:title").get('content') elif html.find("h1"): title = html.find("h1").string elif html.find_all("h1"): title = html.find_all("h1")[0].string if title: title = title.split('|')[0] return title
MIT License
google-research/language
language/orqa/preprocessing/create_data_splits.py
resplit_ct
python
def resplit_ct(): hash_fn = make_hash_fn() resplit_data( input_path=FLAGS.ct_train_path, output_path=get_resplit_path("CuratedTrec", "train"), keep_fn=lambda x: hash_fn(x) != 0) resplit_data( input_path=FLAGS.ct_train_path, output_path=get_resplit_path("CuratedTrec", "dev"), keep_fn=lambda x: hash_fn(x) == 0) resplit_data( input_path=FLAGS.ct_test_path, output_path=get_resplit_path("CuratedTrec", "test"), keep_fn=lambda x: True)
Resplit the CuratedTrec dataset.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/orqa/preprocessing/create_data_splits.py#L88-L102
import json import os from absl import app from absl import flags import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS flags.DEFINE_string("nq_train_path", None, "Path to the Natural Questions (open) train data.") flags.DEFINE_string("nq_dev_path", None, "Path to the Natural Questions (open) dev data.") flags.DEFINE_string("wb_train_path", None, "Path to WebQuestions train data.") flags.DEFINE_string("wb_test_path", None, "Path to WebQuestions test data.") flags.DEFINE_string("ct_train_path", None, "Path to CuratedTrec train data.") flags.DEFINE_string("ct_test_path", None, "Path to CuratedTrec test data.") flags.DEFINE_string("output_dir", None, "Output directory.") def get_resplit_path(name, split): return os.path.join(FLAGS.output_dir, "{}.resplit.{}.jsonl".format(name, split)) def resplit_data(input_path, output_path, keep_fn): count = 0 with tf.io.gfile.GFile(output_path, "w") as output_file: for line in tf.io.gfile.GFile(input_path): example = json.loads(line) if keep_fn(example["question"]): output_file.write(line) count += 1 tf.logging.info("Wrote {} examples to {}".format(count, output_path)) def resplit_nq(): hash_fn = make_hash_fn() resplit_data( input_path=FLAGS.nq_train_path, output_path=get_resplit_path("NaturalQuestions", "train"), keep_fn=lambda x: hash_fn(x) != 0) resplit_data( input_path=FLAGS.nq_train_path, output_path=get_resplit_path("NaturalQuestions", "dev"), keep_fn=lambda x: hash_fn(x) == 0) resplit_data( input_path=FLAGS.nq_dev_path, output_path=get_resplit_path("NaturalQuestions", "test"), keep_fn=lambda x: True) def resplit_wb(): hash_fn = make_hash_fn() resplit_data( input_path=FLAGS.wb_train_path, output_path=get_resplit_path("WebQuestions", "train"), keep_fn=lambda x: hash_fn(x) != 0) resplit_data( input_path=FLAGS.wb_train_path, output_path=get_resplit_path("WebQuestions", "dev"), keep_fn=lambda x: hash_fn(x) == 0) resplit_data( input_path=FLAGS.wb_test_path, output_path=get_resplit_path("WebQuestions", "test"), keep_fn=lambda x: True)
Apache License 2.0
timsusa/aptly_api_cli
aptly_cli/cli/cli.py
_get_parser_opts
python
def _get_parser_opts(): parser = OptionParser() parser.add_option('--repo_list', action='store_true', help='List all local repos') parser.add_option('--repo_create', nargs=1, help='Create local repo', metavar='REPO_NAME [COMMENT] [DISTRIBUTION] [COMPONENT]') parser.add_option('--repo_show_packages', nargs=1, help='Shows packages from repo', metavar='REPO_NAME [PACKAGE_TO_SEARCH] [WITH_DEPS] [FORMAT]') parser.add_option('--repo_show', nargs=1, help='Show basic repo-information', metavar='REPO_NAME') parser.add_option('--repo_edit', nargs=1, help='Edit repo-information', metavar='REPO_NAME COMMENT DISTRIBUTION COMPONENT') parser.add_option('--repo_delete', nargs=1, help='Delete repository', metavar='REPO_NAME') parser.add_option('--repo_add_packages_by_key', nargs=2, help='Add packages to local repo by key', metavar='REPO_NAME PACKAGE_REFS') parser.add_option('--repo_delete_packages_by_key', nargs=2, help='Delete packages from repository by key', metavar='REPO_NAME PACKAGE_REFS') parser.add_option('--file_list_dirs', action='store_true', help='Lists all upload-directories') parser.add_option('--file_upload', nargs=2, help='Upload file to local upload-directory', metavar='UPLOAD_DIR FILE') parser.add_option('--repo_add_package_from_upload', nargs=3, help='Add package from upload folder to local repo', metavar='REPO_NAME UPLOAD_DIR PACKAGE_NAME') parser.add_option('--file_list', action='store_true', help='List uploaded files') parser.add_option('--file_delete_dir', nargs=1, help='Delete upload directory', metavar='UPLOAD_DIR') parser.add_option('--file_delete', nargs=2, help='Delete a file in upload directory', metavar='UPLOAD_DIR FILE') parser.add_option('--snapshot_create_from_local_repo', nargs=2, help='Create snapshot from local repo', metavar='SNAPSHOT_NAME REPO_NAME [DESCRIPTION]') parser.add_option('--snapshot_create_by_pack_refs', nargs=3, help='Create snapshot by package references', metavar='SNAPSHOT_NAME SOURCE_SNAPSHOTS PACKAGE_REF_LIST [DESCRIPTION]') parser.add_option('--snapshot_show', nargs=1, help='Show basic information about snapshot', metavar='SNAPSHOT_NAME') parser.add_option('--snapshot_show_packages', nargs=1, help='Show all packages the snapshot is containing or optionally search for one.', metavar='SNAPSHOT_NAME [PACKAGE_TO_SEARCH] [WITH_DEPS] [FORMAT]') parser.add_option('--snapshot_update', nargs=2, help='Rename snapshot and optionally change description', metavar='OLD_SNAPSHOT_NAME NEW_SNAPSHOT_NAME [DESCRIPTION]') parser.add_option('--snapshot_list', action='store_true', help='Lists all available snapshots', metavar='[SORT_BY_NAME_OR_TIME]') parser.add_option('--snapshot_diff', nargs=2, help='List differences of two snapshots', metavar='LEFT_SNAPSHOT_NAME RIGHT_SNAPSHOT_NAME') parser.add_option('--snapshot_delete', nargs=1, help='Delete snapshot by name. Optionally force deletion.', metavar='SNAPSHOT_NAME [FORCE_DELETION]') parser.add_option('--publish_list', action='store_true', help='List all available repositories to publish to') parser.add_option('--publish', nargs=5, help='Publish snapshot or repository to storage', metavar='PREFIX SOURCES_KIND SOURCES_LIST DISTRIBUTION COMPONENT_LIST [LABEL] [ORIGIN] \ [FORCE_OVERWRITE] [ARCHITECTURES_LIST]') parser.add_option('--publish_drop', nargs=2, help='Drop published repo content', metavar='PREFIX DISTRIBUTION [FORCE_REMOVAL]') parser.add_option('--publish_switch', nargs=3, help='Switching snapshots to published repo with minimal server down time.', metavar='PREFIX SOURCES_LIST DISTRIBUTION [COMPONENT] [FORCE_OVERWRITE]') parser.add_option('--get_version', action='store_true', help='Returns aptly version') parser.add_option('--package_show_by_key', nargs=1, help='Show packages by key', metavar='PACKAGE_KEY') parser.add_option('--create_config', action='store_true', help='Creates standard config file (aptly-cli.conf) in $HOME') parser.add_option('--get_last_snapshots', nargs=2, help='Returns the last n snapshots by prefix or optional postfix.', metavar='PREFIX NR_OF_VERS [POSTFIX]') parser.add_option('--clean_last_snapshots', nargs=2, help='Cleans the last n snapshots by prefix or optional postfix.', metavar='PREFIX NR_OF_VERS [POSTFIX]') parser.add_option('--clean_mirrored_snapshots', action='store_true', help='Cleans out snapshots, which were taken from mirrors (from config)') parser.add_option('--clean_repo_packages', action='store_true', help='Cleans out packages, which were taken from repos (from config)') parser.add_option('--list_repos_and_packages', action='store_true', help='List all repos with their containing packages.') parser.add_option('--get_last_packages', nargs=3, help='Returns the last n packages by reponame, prefix or optional postfix.', metavar='REPO_NAME PREFIX NR_OF_VERS [POSTFIX]') parser.add_option('--clean_last_packages', nargs=3, help='Cleans the last n packages by reponame, prefix or optional postfix.', metavar='REPO_NAME PREFIX NR_OF_VERS [POSTFIX]') parser.add_option('--diff_both_last_snapshots_mirrors', action='store_true', help='Sorts list of snapshots and makes a diff between the last two.') parser.add_option('--publish_switch_3rdparty_production', action='store_true', help='Publish the last 3rd party staging snapshot to s3 production, if new content is available') return parser
_get_parser_opts Create parser, options and return object.
https://github.com/timsusa/aptly_api_cli/blob/011ba8e7f464726b336b53f6b2cbdc4490b5180c/aptly_cli/cli/cli.py#L33-L218
import sys import json import os from optparse import OptionParser from aptly_cli.util.util import Util def main(): util = Util() parser = _get_parser_opts() (opts, args) = parser.parse_args() _execute_opts(opts, args, util) if len(sys.argv) == 1: parser.print_help() home = os.path.expanduser("~") name = home + '/aptly-cli.conf' if not os.path.exists(name): print "No config file (aptly-cli.conf) found at $HOME. Please create one by --create_config option" sys.exit(0)
MIT License
ppaanngggg/paradoxtrading
ParadoxTrading/Utils/DataStruct.py
DataStruct.index
python
def index(self) -> list: return self.data[self.index_name]
return the column of index :return:
https://github.com/ppaanngggg/paradoxtrading/blob/2c4024e60b14bf630fd141ccd4c77f197b7c901a/ParadoxTrading/Utils/DataStruct.py#L310-L316
import pickle import typing from bisect import bisect_left, bisect_right import pandas as pd import tabulate class DataStruct: EXPAND_STRICT = 'strict' EXPAND_INTERSECT = 'intersect' def __init__( self, _keys: typing.Sequence[str], _index_name: str, _rows: typing.Sequence[typing.Sequence] = None, _dicts: typing.Sequence[dict] = None ): assert _index_name in _keys self.index_name = _index_name self.data: typing.Dict[str, typing.List] = {} for key in _keys: self.data[key] = [] self.loc: Loc = Loc(self) self.iloc: ILoc = ILoc(self) if _rows is not None: self.addRows(_rows, _keys) if _dicts is not None: self.addDicts(_dicts) def __getitem__(self, _item: str) -> typing.List[typing.Any]: assert type(_item) == str return self.data[_item] def __len__(self) -> int: return len(self.index()) def __iter__(self): for i in range(len(self.index())): yield self.iloc[i] def __repr__(self): if len(self) > 20: tmp_rows, tmp_keys = self.iloc[:8].toRows() tmp_rows.append(['...' for _ in tmp_keys]) tmp_rows += self.iloc[-8:].toRows()[0] return tabulate.tabulate(tmp_rows, headers=tmp_keys) tmp_rows, tmp_keys = self.toRows() return tabulate.tabulate(tmp_rows, headers=tmp_keys) def addRow( self, _row: typing.Sequence[typing.Any], _keys: typing.Sequence[str] ): assert len(_row) == len(_keys) self.addDict(dict(zip(_keys, _row))) def addRows( self, _rows: typing.Sequence[typing.Sequence], _keys: typing.Sequence[str] ): for row in _rows: self.addRow(row, _keys) def addDict(self, _dict: typing.Dict[str, typing.Any]): index_value = _dict[self.index_name] insert_idx = bisect_right(self.index(), index_value) for k in self.data.keys(): self.data[k].insert(insert_idx, _dict[k]) def addDicts(self, _dicts: typing.Sequence[dict]): for _dict in _dicts: self.addDict(_dict) def toRows( self, _keys=None ) -> (typing.Sequence[typing.Sequence[typing.Any]], typing.List[str]): rows = [] keys: typing.List[str] = _keys if keys is None: keys = self.getColumnNames() for i in range(len(self)): rows.append([self.data[k][i] for k in keys]) return rows, keys def toRow( self, _index: int = 0, _keys=None ) -> (typing.Sequence[typing.Any], typing.List[str]): keys: typing.List[str] = _keys if keys is None: keys = self.getColumnNames() row = [self.data[k][_index] for k in keys] return row, keys def toDicts(self) -> (typing.List[typing.Dict[str, typing.Any]]): dicts = [] rows, keys = self.toRows() for d in rows: dicts.append(dict(zip(keys, d))) return dicts def toDict(self, _index: int = 0) -> (typing.Dict[str, typing.Any]): row, keys = self.toRow(_index) return dict(zip(keys, row)) def clone(self, _columns: typing.List[str] = None) -> 'DataStruct': if _columns is None: return self.iloc[:] keys_self = self.getColumnNames(_include_index_name=False) for column in _columns: assert column in keys_self keys_new = _columns if self.index_name not in keys_new: keys_new.append(self.index_name) datastruct = DataStruct( keys_new, self.index_name ) datastruct.addRows(*self.toRows(keys_new)) return datastruct def merge(self, _struct: "DataStruct"): self.addRows(*_struct.toRows()) def expand( self, _struct: "DataStruct", _type: str = 'strict' ) -> 'DataStruct': assert self.index_name == _struct.index_name self_names = self.getColumnNames(_include_index_name=False) struct_names = _struct.getColumnNames(_include_index_name=False) assert not (set(self_names) & set(struct_names)) if _type == self.EXPAND_STRICT: assert len(self) == len(_struct) for idx1, idx2 in zip(self.index(), _struct.index()): assert idx1 == idx2 index = self.index() elif _type == self.EXPAND_INTERSECT: index = sorted(set(self.index()) & set(_struct.index())) else: raise Exception('unknown type!') new_names = self_names + struct_names new_names.append(self.index_name) new_struct = DataStruct(new_names, self.index_name) for i in index: tmp_dict = {self.index_name: i} self_dict = self.loc[i] if self_dict is None: self_dict = dict([(d, None) for d in self_names]) else: self_dict = self_dict.toDict() tmp_dict.update(self_dict) struct_dict = _struct.loc[i] if struct_dict is None: struct_dict = dict([(d, None) for d in struct_names]) else: struct_dict = struct_dict.toDict() tmp_dict.update(struct_dict) new_struct.addDict(tmp_dict) return new_struct def toPandas(self) -> pd.DataFrame: df = pd.DataFrame(data=self.data, index=self.index()) del df[self.index_name] df.index.name = self.index_name return df @staticmethod def fromPandas(df: pd.DataFrame) -> 'DataStruct': columns = list(df) index_name = df.index.name columns.append(index_name) datastruct = DataStruct(columns, index_name) sorted_df = df.sort_index() datastruct.data[datastruct.index_name] = sorted_df.index.tolist() for column in df: datastruct.data[column] = df[column].tolist() return datastruct def save(self, _path: str): pickle.dump(self, open(_path, 'wb')) @staticmethod def load(_path: str) -> 'DataStruct': return pickle.load(open(_path, 'rb'))
MIT License
googleapis/python-speech
google/cloud/speech_v1p1beta1/services/adaptation/transports/grpc_asyncio.py
AdaptationGrpcAsyncIOTransport.list_custom_classes
python
def list_custom_classes( self, ) -> Callable[ [cloud_speech_adaptation.ListCustomClassesRequest], Awaitable[cloud_speech_adaptation.ListCustomClassesResponse], ]: if "list_custom_classes" not in self._stubs: self._stubs["list_custom_classes"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/ListCustomClasses", request_serializer=cloud_speech_adaptation.ListCustomClassesRequest.serialize, response_deserializer=cloud_speech_adaptation.ListCustomClassesResponse.deserialize, ) return self._stubs["list_custom_classes"]
r"""Return a callable for the list custom classes method over gRPC. List custom classes. Returns: Callable[[~.ListCustomClassesRequest], Awaitable[~.ListCustomClassesResponse]]: A function that, when called, will call the underlying RPC on the server.
https://github.com/googleapis/python-speech/blob/cc97a580bb4e693a1c3e5170064164e0c5d8482b/google/cloud/speech_v1p1beta1/services/adaptation/transports/grpc_asyncio.py#L435-L461
import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import packaging.version import grpc from grpc.experimental import aio from google.cloud.speech_v1p1beta1.types import cloud_speech_adaptation from google.cloud.speech_v1p1beta1.types import resource from google.protobuf import empty_pb2 from .base import AdaptationTransport, DEFAULT_CLIENT_INFO from .grpc import AdaptationGrpcTransport class AdaptationGrpcAsyncIOTransport(AdaptationTransport): _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "speech.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "speech.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: return self._grpc_channel @property def create_phrase_set( self, ) -> Callable[ [cloud_speech_adaptation.CreatePhraseSetRequest], Awaitable[resource.PhraseSet] ]: if "create_phrase_set" not in self._stubs: self._stubs["create_phrase_set"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/CreatePhraseSet", request_serializer=cloud_speech_adaptation.CreatePhraseSetRequest.serialize, response_deserializer=resource.PhraseSet.deserialize, ) return self._stubs["create_phrase_set"] @property def get_phrase_set( self, ) -> Callable[ [cloud_speech_adaptation.GetPhraseSetRequest], Awaitable[resource.PhraseSet] ]: if "get_phrase_set" not in self._stubs: self._stubs["get_phrase_set"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/GetPhraseSet", request_serializer=cloud_speech_adaptation.GetPhraseSetRequest.serialize, response_deserializer=resource.PhraseSet.deserialize, ) return self._stubs["get_phrase_set"] @property def list_phrase_set( self, ) -> Callable[ [cloud_speech_adaptation.ListPhraseSetRequest], Awaitable[cloud_speech_adaptation.ListPhraseSetResponse], ]: if "list_phrase_set" not in self._stubs: self._stubs["list_phrase_set"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/ListPhraseSet", request_serializer=cloud_speech_adaptation.ListPhraseSetRequest.serialize, response_deserializer=cloud_speech_adaptation.ListPhraseSetResponse.deserialize, ) return self._stubs["list_phrase_set"] @property def update_phrase_set( self, ) -> Callable[ [cloud_speech_adaptation.UpdatePhraseSetRequest], Awaitable[resource.PhraseSet] ]: if "update_phrase_set" not in self._stubs: self._stubs["update_phrase_set"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/UpdatePhraseSet", request_serializer=cloud_speech_adaptation.UpdatePhraseSetRequest.serialize, response_deserializer=resource.PhraseSet.deserialize, ) return self._stubs["update_phrase_set"] @property def delete_phrase_set( self, ) -> Callable[ [cloud_speech_adaptation.DeletePhraseSetRequest], Awaitable[empty_pb2.Empty] ]: if "delete_phrase_set" not in self._stubs: self._stubs["delete_phrase_set"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/DeletePhraseSet", request_serializer=cloud_speech_adaptation.DeletePhraseSetRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_phrase_set"] @property def create_custom_class( self, ) -> Callable[ [cloud_speech_adaptation.CreateCustomClassRequest], Awaitable[resource.CustomClass], ]: if "create_custom_class" not in self._stubs: self._stubs["create_custom_class"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/CreateCustomClass", request_serializer=cloud_speech_adaptation.CreateCustomClassRequest.serialize, response_deserializer=resource.CustomClass.deserialize, ) return self._stubs["create_custom_class"] @property def get_custom_class( self, ) -> Callable[ [cloud_speech_adaptation.GetCustomClassRequest], Awaitable[resource.CustomClass] ]: if "get_custom_class" not in self._stubs: self._stubs["get_custom_class"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1p1beta1.Adaptation/GetCustomClass", request_serializer=cloud_speech_adaptation.GetCustomClassRequest.serialize, response_deserializer=resource.CustomClass.deserialize, ) return self._stubs["get_custom_class"] @property
Apache License 2.0
bpsmith/tia
tia/rlab/table.py
pad_positive_wrapper
python
def pad_positive_wrapper(fmtfct): def check_and_append(*args, **kwargs): result = fmtfct(*args, **kwargs) if fmtfct.parens and not result.endswith(')'): result += ' ' return result return check_and_append
Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are used to denote negative numbers
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L43-L53
from reportlab.platypus import Table, TableStyle, Flowable from reportlab.lib.colors import grey, white, HexColor, black, gray from matplotlib.colors import rgb2hex, LinearSegmentedColormap from matplotlib.pyplot import get_cmap import numpy as np import pandas as pd from tia.rlab.components import KeepInFrame import tia.util.fmt as fmt __all__ = ['ConditionalRedBlack', 'DynamicTable', 'TableFormatter', 'RegionFormatter', 'IntFormatter', 'FloatFormatter', 'PercentFormatter', 'ThousandsFormatter', 'MillionsFormatter', 'BillionsFormatter', 'DollarCentsFormatter', 'DollarFormatter', 'ThousandDollarsFormatter', 'MillionDollarsFormatter', 'BillionDollarsFormatter', 'YmdFormatter', 'Y_m_dFormatter', 'DynamicNumberFormatter', 'BorderTypeGrid', 'BorderTypeHorizontal', 'BorderTypeOutline', 'BorderTypeOutline', 'BorderTypeVertical', 'Style', 'BorderTypeOutlineCols'] DefaultHeaderStyle = { "GRID": (.5, grey), "BOX": (.25, black), "VALIGN": "MIDDLE", "LEADING": 6, "LEFTPADDING": 3, "RIGHTPADDING": 3, "BOTTOMPADDING": 3, "TOPPADDING": 3, "FONTSIZE": 6, "BACKGROUND": HexColor("#404040"), "FONTNAME": "Helvetica", "ALIGN": "CENTER", "TEXTCOLOR": white } DefaultCellStyle = { "GRID": (.5, grey), "BOX": (.25, black), "VALIGN": "MIDDLE", "LEADING": 6, "LEFTPADDING": 3, "RIGHTPADDING": 3, "BOTTOMPADDING": 2, "TOPPADDING": 2, "ALIGN": "CENTER", "TEXTCOLOR": black, "ROWBACKGROUNDS": [[HexColor("#e3ebf4"), white]], "FONTSIZE": 6, "FONTNAME": "Helvetica" } DefaultIndexStyle = { "GRID": (.5, grey), "BOX": (.25, black), "VALIGN": "MIDDLE", "LEADING": 6, "LEFTPADDING": 3, "RIGHTPADDING": 3, "BOTTOMPADDING": 2, "TOPPADDING": 2, "ALIGN": "RIGHT", "TEXTCOLOR": black, "ROWBACKGROUNDS": [[HexColor("#e3ebf4"), white]], "FONTSIZE": 6, "FONTNAME": "Helvetica" } DefaultWeight = .7 AlignRight = {'ALIGN': 'RIGHT'} ConditionalRedBlack = lambda x: x < 0 and dict(TEXTCOLOR=HexColor("#800000"))
BSD 3-Clause New or Revised License
qiskit/qiskit-aqua
qiskit/aqua/algorithms/minimum_eigen_solvers/vqe.py
VQE._run
python
def _run(self) -> 'VQEResult': if self.operator is None: raise AquaError("The operator was never provided.") self._check_operator_varform() self._quantum_instance.circuit_summary = True self._eval_count = 0 if self._gradient: if isinstance(self._gradient, GradientBase): self._gradient = self._gradient.gradient_wrapper( ~StateFn(self._operator) @ StateFn(self._var_form), bind_params=self._var_form_params, backend=self._quantum_instance) vqresult = self.find_minimum(initial_point=self.initial_point, var_form=self.var_form, cost_fn=self._energy_evaluation, gradient_fn=self._gradient, optimizer=self.optimizer) self._ret = {} self._ret['num_optimizer_evals'] = vqresult.optimizer_evals self._ret['min_val'] = vqresult.optimal_value self._ret['opt_params'] = vqresult.optimal_point self._ret['eval_time'] = vqresult.optimizer_time self._ret['opt_params_dict'] = vqresult.optimal_parameters if self._ret['num_optimizer_evals'] is not None and self._eval_count >= self._ret['num_optimizer_evals']: self._eval_count = self._ret['num_optimizer_evals'] self._eval_time = self._ret['eval_time'] logger.info('Optimization complete in %s seconds.\nFound opt_params %s in %s evals', self._eval_time, self._ret['opt_params'], self._eval_count) self._ret['eval_count'] = self._eval_count result = VQEResult() result.combine(vqresult) result.eigenvalue = vqresult.optimal_value + 0j result.eigenstate = self.get_optimal_vector() self._ret['energy'] = self.get_optimal_cost() self._ret['eigvals'] = np.asarray([self._ret['energy']]) self._ret['eigvecs'] = np.asarray([result.eigenstate]) if len(self.aux_operators) > 0: self._eval_aux_ops() result.aux_operator_eigenvalues = self._ret['aux_ops'][0] result.cost_function_evals = self._eval_count return result
Run the algorithm to compute the minimum eigenvalue. Returns: The result of the VQE algorithm as ``VQEResult``. Raises: AquaError: Wrong setting of operator and backend.
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/algorithms/minimum_eigen_solvers/vqe.py#L403-L467
from typing import Optional, List, Callable, Union, Dict, Any import logging import warnings from time import time import numpy as np from qiskit import ClassicalRegister, QuantumCircuit from qiskit.circuit import Parameter from qiskit.circuit.library import RealAmplitudes from qiskit.providers import BaseBackend from qiskit.providers import Backend from qiskit.aqua import QuantumInstance, AquaError from qiskit.aqua.algorithms import QuantumAlgorithm from qiskit.aqua.operators import (OperatorBase, ExpectationBase, ExpectationFactory, StateFn, CircuitStateFn, LegacyBaseOperator, ListOp, I, CircuitSampler) from qiskit.aqua.operators.gradients import GradientBase from qiskit.aqua.components.optimizers import Optimizer, SLSQP from qiskit.aqua.components.variational_forms import VariationalForm from qiskit.aqua.utils.validation import validate_min from qiskit.aqua.utils.backend_utils import is_aer_provider from ..vq_algorithm import VQAlgorithm, VQResult from .minimum_eigen_solver import MinimumEigensolver, MinimumEigensolverResult logger = logging.getLogger(__name__) class VQE(VQAlgorithm, MinimumEigensolver): def __init__(self, operator: Optional[Union[OperatorBase, LegacyBaseOperator]] = None, var_form: Optional[Union[QuantumCircuit, VariationalForm]] = None, optimizer: Optional[Optimizer] = None, initial_point: Optional[np.ndarray] = None, gradient: Optional[Union[GradientBase, Callable]] = None, expectation: Optional[ExpectationBase] = None, include_custom: bool = False, max_evals_grouped: int = 1, aux_operators: Optional[List[Optional[Union[OperatorBase, LegacyBaseOperator]]]] = None, callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None, quantum_instance: Optional[ Union[QuantumInstance, BaseBackend, Backend]] = None) -> None: validate_min('max_evals_grouped', max_evals_grouped, 1) if var_form is None: var_form = RealAmplitudes() if optimizer is None: optimizer = SLSQP() if initial_point is None and hasattr(var_form, 'preferred_init_points'): initial_point = var_form.preferred_init_points self._max_evals_grouped = max_evals_grouped self._circuit_sampler = None self._expectation = expectation self._user_valid_expectation = self._expectation is not None self._include_custom = include_custom self._expect_op = None self._operator = None super().__init__(var_form=var_form, optimizer=optimizer, cost_fn=self._energy_evaluation, gradient=gradient, initial_point=initial_point, quantum_instance=quantum_instance) self._ret = None self._eval_time = None self._optimizer.set_max_evals_grouped(max_evals_grouped) self._callback = callback if operator is not None: self.operator = operator self.aux_operators = aux_operators self._eval_count = 0 logger.info(self.print_settings()) @property def operator(self) -> Optional[OperatorBase]: return self._operator @operator.setter def operator(self, operator: Union[OperatorBase, LegacyBaseOperator]) -> None: if isinstance(operator, LegacyBaseOperator): operator = operator.to_opflow() self._operator = operator self._expect_op = None self._check_operator_varform() if not self._user_valid_expectation: self._try_set_expectation_value_from_factory() def _try_set_expectation_value_from_factory(self) -> None: if self.operator is not None and self.quantum_instance is not None: self._set_expectation(ExpectationFactory.build(operator=self.operator, backend=self.quantum_instance, include_custom=self._include_custom)) def _set_expectation(self, exp: ExpectationBase) -> None: self._expectation = exp self._user_valid_expectation = False self._expect_op = None @QuantumAlgorithm.quantum_instance.setter def quantum_instance(self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend]) -> None: super(VQE, self.__class__).quantum_instance.__set__(self, quantum_instance) self._circuit_sampler = CircuitSampler( self._quantum_instance, param_qobj=is_aer_provider(self._quantum_instance.backend)) if not self._user_valid_expectation: self._try_set_expectation_value_from_factory() @property def expectation(self) -> ExpectationBase: return self._expectation @expectation.setter def expectation(self, exp: ExpectationBase) -> None: self._set_expectation(exp) self._user_valid_expectation = self._expectation is not None @property def aux_operators(self) -> Optional[List[Optional[OperatorBase]]]: return self._aux_operators @aux_operators.setter def aux_operators(self, aux_operators: Optional[ Union[OperatorBase, LegacyBaseOperator, List[Optional[Union[OperatorBase, LegacyBaseOperator]]]]]) -> None: if aux_operators is None: aux_operators = [] elif not isinstance(aux_operators, list): aux_operators = [aux_operators] self._aux_op_nones = [op is None for op in aux_operators] if aux_operators: zero_op = I.tensorpower(self.operator.num_qubits) * 0.0 converted = [] for op in aux_operators: if op is None: converted.append(zero_op) elif isinstance(op, LegacyBaseOperator): converted.append(op.to_opflow()) else: converted.append(op) aux_operators = [zero_op if op == 0 else op for op in converted] self._aux_operators = aux_operators def _check_operator_varform(self): if self.operator is not None and self.var_form is not None: if self.operator.num_qubits != self.var_form.num_qubits: try: self.var_form.num_qubits = self.operator.num_qubits self._var_form_params = sorted(self.var_form.parameters, key=lambda p: p.name) except AttributeError as ex: raise AquaError("The number of qubits of the variational form does not match " "the operator, and the variational form does not allow setting " "the number of qubits using `num_qubits`.") from ex @VQAlgorithm.optimizer.setter def optimizer(self, optimizer: Optimizer): super(VQE, self.__class__).optimizer.__set__(self, optimizer) if optimizer is not None: optimizer.set_max_evals_grouped(self._max_evals_grouped) @property def setting(self): ret = "Algorithm: {}\n".format(self.__class__.__name__) params = "" for key, value in self.__dict__.items(): if key[0] == "_": if "initial_point" in key and value is None: params += "-- {}: {}\n".format(key[1:], "Random seed") else: params += "-- {}: {}\n".format(key[1:], value) ret += "{}".format(params) return ret def print_settings(self): ret = "\n" ret += "==================== Setting of {} ============================\n".format( self.__class__.__name__) ret += "{}".format(self.setting) ret += "===============================================================\n" if hasattr(self._var_form, 'setting'): ret += "{}".format(self._var_form.setting) elif hasattr(self._var_form, 'print_settings'): ret += "{}".format(self._var_form.print_settings()) elif isinstance(self._var_form, QuantumCircuit): ret += "var_form is a custom circuit" else: ret += "var_form has not been set" ret += "===============================================================\n" ret += "{}".format(self._optimizer.setting) ret += "===============================================================\n" return ret def construct_expectation(self, parameter: Union[List[float], List[Parameter], np.ndarray] ) -> OperatorBase: if self.operator is None: raise AquaError("The operator was never provided.") self._check_operator_varform() if isinstance(self.var_form, QuantumCircuit): param_dict = dict(zip(self._var_form_params, parameter)) wave_function = self.var_form.assign_parameters(param_dict) else: wave_function = self.var_form.construct_circuit(parameter) if self._expectation is None: self._try_set_expectation_value_from_factory() if self._expectation is None: raise AquaError('No expectation set and could not automatically set one, please ' 'try explicitly setting an expectation or specify a backend so it ' 'can be chosen automatically.') observable_meas = self.expectation.convert(StateFn(self.operator, is_measurement=True)) ansatz_circuit_op = CircuitStateFn(wave_function) return observable_meas.compose(ansatz_circuit_op).reduce() def construct_circuit(self, parameter: Union[List[float], List[Parameter], np.ndarray] ) -> List[QuantumCircuit]: expect_op = self.construct_expectation(parameter).to_circuit_op() circuits = [] def extract_circuits(op): if isinstance(op, CircuitStateFn): circuits.append(op.primitive) elif isinstance(op, ListOp): for op_i in op.oplist: extract_circuits(op_i) extract_circuits(expect_op) return circuits @classmethod def supports_aux_operators(cls) -> bool: return True
Apache License 2.0
ai4finance-llc/neofinrl
stable_baselines3/a2c/a2c.py
A2C.train
python
def train(self) -> None: self._update_learning_rate(self.policy.optimizer) for rollout_data in self.rollout_buffer.get(batch_size=None): actions = rollout_data.actions if isinstance(self.action_space, spaces.Discrete): actions = actions.long().flatten() values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions) values = values.flatten() advantages = rollout_data.advantages if self.normalize_advantage: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) policy_loss = -(advantages * log_prob).mean() value_loss = F.mse_loss(rollout_data.returns, values) if entropy is None: entropy_loss = -th.mean(-log_prob) else: entropy_loss = -th.mean(entropy) loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss self.policy.optimizer.zero_grad() loss.backward() th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) self.policy.optimizer.step() explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten()) self._n_updates += 1 self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") self.logger.record("train/explained_variance", explained_var) self.logger.record("train/entropy_loss", entropy_loss.item()) self.logger.record("train/policy_loss", policy_loss.item()) self.logger.record("train/value_loss", value_loss.item()) if hasattr(self.policy, "log_std"): self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
Update policy using the currently gathered rollout buffer (one gradient step over whole data).
https://github.com/ai4finance-llc/neofinrl/blob/51338dbb0ec86f74e4fc6cce90bc385a4639de79/stable_baselines3/a2c/a2c.py#L117-L174
from typing import Any, Dict, Optional, Type, Union import torch as th from gym import spaces from torch.nn import functional as F from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm from stable_baselines3.common.policies import ActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import explained_variance class A2C(OnPolicyAlgorithm): def __init__( self, policy: Union[str, Type[ActorCriticPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 7e-4, n_steps: int = 5, gamma: float = 0.99, gae_lambda: float = 1.0, ent_coef: float = 0.0, vf_coef: float = 0.5, max_grad_norm: float = 0.5, rms_prop_eps: float = 1e-5, use_rms_prop: bool = True, use_sde: bool = False, sde_sample_freq: int = -1, normalize_advantage: bool = False, tensorboard_log: Optional[str] = None, create_eval_env: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): super(A2C, self).__init__( policy, env, learning_rate=learning_rate, n_steps=n_steps, gamma=gamma, gae_lambda=gae_lambda, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, use_sde=use_sde, sde_sample_freq=sde_sample_freq, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, create_eval_env=create_eval_env, seed=seed, _init_setup_model=False, supported_action_spaces=( spaces.Box, spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary, ), ) self.normalize_advantage = normalize_advantage if use_rms_prop and "optimizer_class" not in self.policy_kwargs: self.policy_kwargs["optimizer_class"] = th.optim.RMSprop self.policy_kwargs["optimizer_kwargs"] = dict(alpha=0.99, eps=rms_prop_eps, weight_decay=0) if _init_setup_model: self._setup_model()
MIT License
nautobot/nautobot
nautobot/utilities/management/commands/__init__.py
custom_deconstruct
python
def custom_deconstruct(field): name, path, args, kwargs = _deconstruct(field) for attr in EXEMPT_ATTRS: kwargs.pop(attr, None) if hasattr(field, "CHOICES"): kwargs["choices"] = field.CHOICES return name, path, args, kwargs
Imitate the behavior of the stock deconstruct() method, but ignore the field attributes listed above.
https://github.com/nautobot/nautobot/blob/624ac936671db928e24b28785e678b96588384c7/nautobot/utilities/management/commands/__init__.py#L13-L28
from django.db import models EXEMPT_ATTRS = [ "choices", "help_text", "verbose_name", ] _deconstruct = models.Field.deconstruct
Apache License 2.0
koenhaak/congrads
bayesreg.py
BLR.predict
python
def predict(self, hyp, X, y, Xs): if (hyp != self.hyp).all() or not(hasattr(self, 'A')): self.post(hyp, X, y) beta = np.exp(hyp[0]) ys = Xs.dot(self.m) s2 = 1/beta + np.sum(Xs*linalg.solve(self.A, Xs.T).T, axis=1) return ys, s2
Function to make predictions from the model
https://github.com/koenhaak/congrads/blob/a988773f7a24d4bd8c0984a887692c098b01962f/bayesreg.py#L237-L250
from __future__ import print_function from __future__ import division import numpy as np from scipy import optimize, linalg from scipy.linalg import LinAlgError class BLR: def __init__(self, hyp=None, X=None, y=None, n_iter=100, tol=1e-3, verbose=False): self.hyp = np.nan self.nlZ = np.nan self.tol = tol self.n_iter = n_iter self.verbose = verbose if (hyp is not None) and (X is not None) and (y is not None): self.post(hyp, X, y) def post(self, hyp, X, y): N = X.shape[0] if len(X.shape) == 1: D = 1 else: D = X.shape[1] if (hyp == self.hyp).all() and hasattr(self, 'N'): print("hyperparameters have not changed, exiting") return beta = np.exp(hyp[0]) alpha = np.exp(hyp[1:]) if self.verbose: print("estimating posterior ... | hyp=", hyp) if len(alpha) == 1 or len(alpha) == D: self.Sigma = np.diag(np.ones(D))/alpha self.iSigma = np.diag(np.ones(D))*alpha else: raise ValueError("hyperparameter vector has invalid length") self.A = beta*X.T.dot(X) + self.iSigma self.m = beta*linalg.solve(self.A, X.T, check_finite=False).dot(y) self.N = N self.D = D self.hyp = hyp def loglik(self, hyp, X, y): beta = np.exp(hyp[0]) if (hyp != self.hyp).all() or not(hasattr(self, 'A')): try: self.post(hyp, X, y) except ValueError: print("Warning: Estimation of posterior distribution failed") nlZ = 1/np.finfo(float).eps return nlZ try: logdetA = 2*sum(np.log(np.diag(np.linalg.cholesky(self.A)))) except (ValueError, LinAlgError): print("Warning: Estimation of posterior distribution failed") nlZ = 1/np.finfo(float).eps return nlZ logdetSigma = sum(np.log(np.diag(self.Sigma))) nlZ = -0.5 * (self.N*np.log(beta) - self.N*np.log(2*np.pi) - logdetSigma - beta*(y-X.dot(self.m)).T.dot(y-X.dot(self.m)) - self.m.T.dot(self.iSigma).dot(self.m) - logdetA ) if not np.isfinite(nlZ): nlZ = 1/np.finfo(float).eps if self.verbose: print("nlZ= ", nlZ, " | hyp=", hyp) self.nlZ = nlZ return nlZ def dloglik(self, hyp, X, y): beta = np.exp(hyp[0]) alpha = np.exp(hyp[1:]) if (hyp != self.hyp).all() or not(hasattr(self, 'A')): try: self.post(hyp, X, y) except ValueError: print("Warning: Estimation of posterior distribution failed") dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps return dnlZ XX = X.T.dot(X) S = np.linalg.inv(self.A) Q = S.dot(X.T) b = (np.eye(self.D) - beta*Q.dot(X)).dot(Q).dot(y) dnlZ = np.zeros(hyp.shape) dnlZ[0] = - (self.N / (2 * beta) - 0.5 * y.dot(y) + y.dot(X).dot(self.m) + beta * y.T.dot(X).dot(b) - 0.5 * self.m.T.dot(XX).dot(self.m) - beta * b.T.dot(self.iSigma).dot(self.m) - 0.5 * np.trace(Q.dot(X)) ) * beta for i in range(0, len(alpha)): if len(alpha) == self.D: dSigma = np.zeros((self.D, self.D)) dSigma[i, i] = -alpha[i] ** -2 diSigma = np.zeros((self.D, self.D)) diSigma[i, i] = 1 else: dSigma = -alpha[i] ** -2*np.eye(self.D) diSigma = np.eye(self.D) F = diSigma c = -beta*S.dot(F).dot(S).dot(X.T).dot(y) dnlZ[i+1] = -(-0.5 * np.trace(self.iSigma.dot(dSigma)) + beta * y.T.dot(X).dot(c) - beta * c.T.dot(XX).dot(self.m) - c.T.dot(self.iSigma).dot(self.m) - 0.5 * self.m.T.dot(F).dot(self.m) - 0.5*np.trace(linalg.solve(self.A, F)) ) * alpha[i] if not all(np.isfinite(dnlZ)): bad = np.where(np.logical_not(np.isfinite(dnlZ))) for b in bad: dnlZ[b] = np.sign(self.dnlZ[b]) / np.finfo(float).eps if self.verbose: print("dnlZ= ", dnlZ, " | hyp=", hyp) self.dnlZ = dnlZ return dnlZ def estimate(self, hyp0, X, y, optimizer='cg'): if optimizer.lower() == 'cg': out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) elif optimizer.lower() == 'powell': out = optimize.fmin_powell(self.loglik, hyp0, (X, y), full_output=1) else: raise ValueError("unknown optimizer") self.hyp = out[0] self.nlZ = out[1] self.optimizer = optimizer return self.hyp
Apache License 2.0
xflr6/bitsets
bitsets/bases.py
BitSet.__contains__
python
def __contains__(self, member): return self._map[member] & self
Set membership. Raises: KeyError: if member is not in the domain of the set.
https://github.com/xflr6/bitsets/blob/849d735cdf68d7a809170e74cc70581b881ee2d8/bitsets/bases.py#L149-L155
from itertools import compress, filterfalse from . import combos from . import integers from . import meta __all__ = ['MemberBits', 'BitSet'] __new__ = int.__new__ class MemberBits(int, metaclass=meta.MemberBitsMeta): _indexes = integers.indexes_optimized _reinverted = integers.reinverted frombitset = fromint = classmethod(int.__new__) @classmethod def frommembers(cls, members=()): return cls.fromint(sum(map(cls._map.__getitem__, set(members)))) @classmethod def frombools(cls, bools=()): return cls.fromint(sum(compress(cls._atoms, bools))) @classmethod def frombits(cls, bits='0'): if len(bits) > cls._len: raise ValueError(f'too many bits {bits!r}') return cls.fromint(bits[::-1], 2) __new__ = frombits.__func__ def __reduce__(self): return __new__, (self.__class__, self._int) def copy(self): return self int = _int = int.real iter_set = integers.indexes def members(self, as_set=False): if as_set: return frozenset(map(self._members.__getitem__, self._indexes())) return tuple(map(self._members.__getitem__, self._indexes())) def bools(self): return tuple(not not self & a for a in self._atoms) def bits(self): return '{0:0{1}b}'.format(self, self._len)[::-1] def __repr__(self): return f'{self.__class__.__name__}({self.bits()!r})' def atoms(self, reverse=False): atoms = reversed(self._atoms) if reverse else self._atoms return filter(self.__and__, atoms) def inatoms(self, reverse=False): atoms = reversed(self._atoms) if reverse else self._atoms return filterfalse(self.__and__, atoms) def powerset(self, start=None, excludestart=False): if start is None: start = self.infimum other = self.atoms() else: if self | start != self: raise ValueError(f'{start!r} is no subset of {self!r}') other = self.fromint(self & ~start).atoms() return map(self.frombitset, combos.shortlex(start, list(other))) def shortlex(self): return bin(self).count('1'), self._reinverted(self._len) def longlex(self): return -bin(self).count('1'), self._reinverted(self._len) def shortcolex(self): return bin(self).count('1'), self._int def longcolex(self): return -bin(self).count('1'), self._int def count(self, value=True): if value not in (True, False): raise ValueError(f'can only count True or False, not {value!r}') return bin(self)[2:].count('01'[value]) def all(self): return self == self.supremum def any(self): return self != self.infimum class BitSet(MemberBits): __new__ = MemberBits.frommembers.__func__ __bool__ = MemberBits.any def __len__(self): return bin(self).count('1') def __iter__(self): return map(self._members.__getitem__, self._indexes())
MIT License
univ-of-utah-marriott-library-apple/aeios
aeios/apps.py
AppManager.recovery
python
def recovery(self): alerts = self.alerts def _recovery(alert): logger = logging.getLogger(__name__) logger.debug("received alert: %r", alert) logger.debug(u" message: %r", alert.message) logger.debug(u" details: %r", alert.details) logger.debug(u" options: %r", alert.options) logger.debug(u" choices: %r", alert.choices) if alerts: logger.debug("previous alerts were found") for a in alerts: if a == alert: logger.error("same alert occurred: %s", alert) logger.debug("%r", alert) raise RecoveryError(alert) else: logger.debug("no previous alerts") alerts.append(alert) if "unknown error" in [alert.message, alert.details]: logger.critical("unknown error: %s", alert) raise alert elif "already exists on" in alert.message: logger.debug(u"skipping installed app: %s", alert) adapter.action("Skip App", ["Apply to all apps"]) elif alert.details: if "An unexpected network error occurred" in alert.details: logger.debug(u"attempting to retry network: %s", alert) adapter.action("Try Again") else: raise RecoveryError(alert) return _recovery
:returns: callback to performed in the case of an alert
https://github.com/univ-of-utah-marriott-library-apple/aeios/blob/daa7088441030c9b6e1d89e07e18b2090645a65c/aeios/apps.py#L497-L540
import re import os import logging import datetime as dt from distutils import version import config from . import resources from device import DeviceList from actools import adapter, cfgutil __author__ = 'Sam Forester' __email__ = 'sam.forester@utah.edu' __copyright__ = 'Copyright (c) 2019 University of Utah, Marriott Library' __license__ = 'MIT' __version__ = "2.4.2" __all__ = ['App', 'AppError', 'AppList', 'AppManager', 'RecoveryError'] logging.getLogger(__name__).addHandler(logging.NullHandler()) class Error(Exception): pass class AppError(Error): pass class RecoveryError(Error): def __init__(self, alert): self.alert = alert def __str__(self): return self.alert.message class VerificationError(Error): pass class SkipVerification(Error): pass class App(object): def __init__(self, record, path=None): self.name = u"{0!s}".format(record['itunesName']) self.version = version.LooseVersion(record['bundleVersion']) self.displayname = record['displayName'] self.identifier = record['bundleIdentifier'] self._file = None def __eq__(self, x): return self.name == x.name and self.version == x.version def __ne__(self, x): return not self == x def __lt__(self, x): if self.name != x.name: raise AppError("unable to compare different apps") return self.version < x.version def __gt__(self, x): if self.name != x.name: raise AppError("unable to compare different apps") return self.version > x.version def __repr__(self): return u"App({0!r})".format(self.name) def __str__(self): return self.name.encode('utf-8') def __unicode__(self): return self.name @property def record(self): return {'itunesName': self.name, 'bundleVersion': str(self.version), 'displayName': self.displayname, 'bundleIdentifier': self.identifier} @property def file(self): if not self._file: try: _files = [x for x in os.listdir(self.path) if x.endswith('.ipa')] self._file = os.path.join(self.path, _files[-1]) except (OSError, IndexError): pass return self._file class AppList(list): @property def names(self): return [x.name for x in self] @property def identifiers(self): return [x.identifier for x in self] def find(self, a): try: return [app for app in self if app == a][0] except IndexError: return a def app(self, name): return [app for app in self if app.name == name][0] def __repr__(self): return "AppList({0!r})".format(self.names) def __unicode__(self): return u'“{0!s}”'.format(u'”, “'.join(self.names)) def __str__(self): names = [x.encode('utf-8') for x in self.names] return '"{0!s}"'.format('", "'.join(names)) class Hook(object): def __init__(self): self.callback = None self.trigger = None class Monitor(object): def __init__(self): self.hook = None self.recovery = None self.result = None self.alerts = [] class AlertRecord(object): def __init__(self, data, alert=None): if alert: self.alert = alert self.count = 1 self.similar = 0 else: self.alert = adapter.Alert.load(data['alert']) self.count = data.get('count', 1) self.similar = data.get('similar', 0) def record(self): return {'alert': self.alert.record, 'timestamp': timestamp, 'count': self.count, 'similar': self.similar} @classmethod def from_alert(cls, alert): return cls({}, alert) class AlertManager(object): def __init__(self, name, resources): self.log = logging.getLogger(__name__ + '.AlertManager') _id = "{0}.errors".format(name) self.config = config.Manager(_id, path=resources.path) try: self.config.read() except config.Missing as e: self.log.error(e) _default = {'Errors': ()} self.log.debug("creating default: %s", _default) self.config.write(_default) def __bool__(self): return True if self.error else False __nonzero__ = __bool__ @property def count(self): return len(self.errors) @property def error(self): try: self.log.debug("returning: %r", self.errors[-1]) return self.errors[-1] except IndexError: self.log.debug("no errors") pass @error.setter def error(self, value): self.log.debug("adding value: %r", value) _errors = self.errors + [value] self.config.reset('Errors', _errors) def add(self, value): self.log.debug("adding value: %r", value) self.error = value @property def errors(self): return self.config.setdefault('Errors', ()) @errors.setter def errors(self, value=()): self.log.debug("reseting value: %r", value) return self.config.reset('Errors', value) def clear(self): self.log.debug("clearing") return self.config.reset('Errors', []) class AppManager(object): def __init__(self, *args, **kwargs): self.log = logging.getLogger(__name__) self.resources = resources.Resources(__name__) domain = self.resources.domain path = self.resources.path self.config = self.resources.config self.file = self.config.file self.errors = AlertManager(domain, self.resources) _apps = resources.DEFAULT.apps try: g_path = path.replace(os.path.expanduser('~'), '') g_config = config.Manager(domain, path=g_path) _apps = g_config.read() self.config.write(_apps) except config.ConfigError: self.log.debug("no global configuration for apps") try: self._record = self.config.read() self.log.debug("found configuration: %s", self.file) except config.Missing as e: self.log.error("unable to read config: %s", e) self.log.info("creating new config from default") self.log.debug("writing config: %r", _apps) self.config.write(_apps) self._record = self.config.read() adapter.log = os.path.join(self.resources.logs, 'acadapter.log') self._apps = AppList() self.alerts = [] @property def error(self): return self.errors.error @property def record(self): return self.config.read() def groups(self, device=None): if device is not None: model_type = re.match(r'^(\w+?)\d+,\d+$', device.model).group(1) groupnames = ["all-{0!s}s".format(model_type)] groupset = set(groupnames) try: _membership = self._record['groups']['model'][device.model] self.log.debug("_membership: %r", _membership) groupset.update(_membership) except KeyError: pass self.log.debug("groupset: %r", groupset) return list(groupset) else: _excluded = ['groups', 'Identifiers', 'errors'] return [x for x in self._record.keys() if x not in _excluded] def list(self, device=None, exclude=()): appset = set() for group in self.groups(device): appset.update(self._record[group]) appset.difference_update(exclude) return list(appset) def membership(self, model): device_type = re.match(r'^(\w+?)\d+,\d+$', model).group(1) _groups = self.groups _groups = ['All', 'All-{0!s}'.format(device_type)] members = {} for _type, v in self.config.get('groups', {}).items(): for _id, groups in v.items(): if name in groups: g = members.setdefault(_type, []) g.append(_id) return members def group(self, name): return {'apps': list(self._record[name]), 'members': self.membership(name)} def add(self, group, apps): if isinstance(apps, (str, unicode)): apps = (apps,) current = self.config.get(group, []) if apps: self.log.debug(u"adding apps: %r: %r", group, apps) for app in apps: if app not in current: current.append(app) self.log.info(u"added app: '%s'", app) else: self.log.info(u"already added: '%s'", app) self._record[group] = current self.config.update({group: current}) else: self.log.debug("nothing to add: %r", apps) return current def remove(self, apps, groups=None): if not apps: self.log.error("nothing to remove") return if isinstance(apps, (str, unicode)): apps = (apps,) elif isinstance(apps, AppList): apps = apps.names if not groups: groups = self.groups() self.log.debug(u"removing: %r from %r", apps, groups) for group in groups: current = set(self.config.get(group, [])) self.log.debug("current: %r", current) modified = current - set(apps) self.log.debug("modified: %r", modified) self.config.update({group: list(modified)}) self._record = self.config.read() def unknown(self, device, appnames=None): applist = AppList([App(x) for x in device.apps]) if not appnames: appset = set(applist.names) else: appset = set(appnames) appset.difference_update(self.list()) return AppList([x for x in applist if x.name in appset]) def breakdown(self, devices): _ipads = self._record['all-iPads'] _breakdown = [(devices, list(set( _ipads)))] if self._record['iPads']: ipads = [x for x in devices if x.model == 'iPad7,5'] if ipads: _breakdown.append((ipads, self._record['iPads'])) ipadpros = [x for x in devices if x.model == 'iPad7,3'] if ipadpros and self._record['iPadPros']: _breakdown.append((ipadpros, self._record['iPadPros'])) return _breakdown def installed(self, devices): _installed = set().union([App(a).name for d in devices for a in d.apps]) self.log.debug("_installed: %r", _installed) for device in devices: _apps = [app for app in device.apps] self.log.debug("_apps: %r", _apps) _applist = AppList(App(a) for a in _apps) self.log.debug("_applist: %r", _applist) names = _applist.names self.log.debug("names: %r", names) _installed.intersection_update(names) return _installed @staticmethod def installedapps(devices): results = cfgutil.get(['installedApps'], devices.ecids) apps = [] for device in devices: _apps = results.get(device.ecid, {}).get('installedApps', []) device.apps = _apps apps += [x for x in _apps if x not in apps] return apps @property
MIT License
google/hypebot
hypebot/plugins/coin_lib.py
Bank.ProcessPayment
python
def ProcessPayment(self, customer, merchants, num_coins, details, msg_fn, can_overdraft=False, merchant_weights=None): if num_coins < 0: logging.error('ProcessPayment called with negative value: %s, %s -> %s', num_coins, customer, merchants) return False if isinstance(merchants, user_pb2.User): merchants = [merchants] if merchant_weights is None: merchant_weights = [1] * len(merchants) total_weight = sum(merchant_weights) merchant_weights = [w / total_weight for w in merchant_weights] amount_paid = 0 success = True for i, (merchant, weight) in enumerate(zip(merchants, merchant_weights)): merchant_amount = min( int(round(num_coins * weight)), num_coins - amount_paid) if i == len(merchants) - 1: merchant_amount = num_coins - amount_paid if merchant_amount > 0: withdrawl_entry = bank_pb2.LedgerEntry( details=details, counterparty=merchant) withdrawl_entry.create_time.GetCurrentTime() deposit_entry = bank_pb2.LedgerEntry( details=details, counterparty=customer, create_time=withdrawl_entry.create_time) if (self._Withdraw(customer, merchant_amount, withdrawl_entry, msg_fn, can_overdraft) and self._Deposit(merchant, merchant_amount, deposit_entry, msg_fn)): amount_paid += merchant_amount else: success = False return success
Process payment from customer to merchant. The merchant will only be paid if the customer has the funds. Args: customer: {User} name of account to withdraw money. merchants: {User or list<User>} name(s) of account(s) to deposit money. num_coins: {int} number of hypecoins to transfer. details: {string} details of transaction. msg_fn: {callable(channel, msg)} function to send messages. can_overdraft: {boolean} whether it is possible to overdraft the account. If True, the account balance can go negative and no fees will be charged. If False, the transaction will fail and an overdraft fee will be assessed if there are insufficient funds for the transaction. merchant_weights: {list<float>} Weight of num_coins that each merchant will receive. Defaults to all 1's. Returns: {boolean} whether payment was successful.
https://github.com/google/hypebot/blob/d85158cf5d966d24c3c2ca5789530864c9fe2662/hypebot/plugins/coin_lib.py#L565-L628
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections import math import numbers import random import re import threading from absl import logging from hypebot.core import schedule_lib from hypebot.core import util_lib from hypebot.data import messages from hypebot.protos import bank_pb2 from hypebot.protos import bet_pb2 from hypebot.protos import user_pb2 import six from google.protobuf import json_format BOOKIE_ACCOUNT = user_pb2.User(user_id='_hypebank', display_name='HypeBank') FEE_ACCOUNT = BOOKIE_ACCOUNT MINT_ACCOUNT = BOOKIE_ACCOUNT SCHOLARSHIP_ACCOUNT = user_pb2.User( user_id='_hypescholarship', display_name='HypeScholarship') SUBSCRIPTION_ACCOUNT = BOOKIE_ACCOUNT HYPECENTS = frozenset([ BOOKIE_ACCOUNT.user_id, FEE_ACCOUNT.user_id, MINT_ACCOUNT.user_id, SCHOLARSHIP_ACCOUNT.user_id, SUBSCRIPTION_ACCOUNT.user_id, ]) class Thievery(object): _DECAY_RATE = 0.75 _DECAY_TIME = util_lib.ArrowTime(2) _BASE_BALANCE_PERCENT = 0.02 _HYPEBOT_SCORE = 1000 def __init__(self, store, bank, bot_name, timezone): self._store = store self._bank = bank self._bot_name = bot_name self._protected_peeps = [self._bot_name] + list(HYPECENTS) self._scheduler = schedule_lib.HypeScheduler(timezone) self._scheduler.DailyCallback( self._DECAY_TIME.to(timezone), self._store.RunInTransaction, self._DecayAllScores) def Rob(self, thief, victim, amount, msg_fn): if amount < 0: msg_fn(None, 'Did you mean !hc gift?') return if victim.user_id in self._protected_peeps: msg_fn(None, 'The Godfather protects his family.') self._bank.ProcessPayment( thief, user_pb2.User(user_id=self._bot_name, display_name=self._bot_name), 500, 'In Soviet Russia, %s steals from you.' % self._bot_name, msg_fn) return victim_balance = self._bank.GetBalance(victim) if victim_balance <= 0: msg_fn(None, 'You cannot milk a dead cow.') return thief_alert = self._GetPDF('thief')[thief.user_id] victim_alert = self._GetPDF('victim')[victim.user_id] offset = self._BASE_BALANCE_PERCENT * (1 - thief_alert - victim_alert) failure_chance = self._Sigmoid(amount / victim_balance, offset) rob_attempt_score = random.random() logging.info('(%s: %0.2f, %s: %0.2f) %s of %s attempt %0.2f >? %0.2f', thief, thief_alert, victim, victim_alert, amount, victim_balance, rob_attempt_score, failure_chance) if rob_attempt_score < failure_chance: self._bank.ProcessPayment(thief, SCHOLARSHIP_ACCOUNT, min(self._bank.GetBalance(thief), amount), 'Victim scholarship fund', msg_fn) self._DistributeToPastVictims(msg_fn) if (rob_attempt_score < failure_chance * thief_alert / (thief_alert + victim_alert + 1e-6)): msg_fn(None, '%s is a known thief and was caught.' % thief.display_name) else: msg_fn( None, '%s is on high alert and caught %s.' % (victim.display_name, thief.display_name)) return if self._bank.ProcessPayment(victim, thief, amount, 'Highway robbery', msg_fn): self._store.RunInTransaction(self._UpdateScores, thief, victim, amount) formatted_amount = util_lib.FormatHypecoins(amount) msg_fn( None, '%s stole %s from %s' % (thief.display_name, formatted_amount, victim.display_name)) msg_fn( victim, 'You\'ve been robbed! %s stole %s' % (thief.display_name, formatted_amount)) def _Sigmoid(self, value, offset, scale=200.0): return 1 / (1 + math.exp(-scale * (value - offset))) def _GetScores(self, collection, tx=None): scores = self._store.GetJsonValue(self._bot_name, 'scores:%s' % collection, tx) return collections.defaultdict( int, scores or {self._bot_name: self._HYPEBOT_SCORE}) def _GetPDF(self, collection): scores = self._GetScores(collection) total_score = sum(scores.values()) pdf = {peep: score / total_score for peep, score in scores.items()} return collections.defaultdict(float, pdf) def _AddToScore(self, collection, name, amount, tx=None): scores = self._GetScores(collection, tx) scores[name] += amount logging.info('Updating %s scores: %s', collection, scores) self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores, tx) def _UpdateScores(self, thief, victim, amount, tx=None): self._AddToScore('thief', thief.user_id, amount, tx) self._AddToScore('victim', victim.user_id, amount, tx) return True def _DecayAllScores(self, tx=None): self._DecayScores('thief', tx) self._DecayScores('victim', tx) return True def _DecayScores(self, collection, tx=None): scores = { peep: int(score * self._DECAY_RATE) for peep, score in self._GetScores(collection, tx).items() if score > 0 } scores[self._bot_name] = self._HYPEBOT_SCORE logging.info('Updating %s scores: %s', collection, scores) self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores, tx) def _DistributeToPastVictims(self, msg_fn): victim_scores = self._GetPDF('victim') scholarship_balance = self._bank.GetBalance(SCHOLARSHIP_ACCOUNT) self._bank.ProcessPayment( SCHOLARSHIP_ACCOUNT, [user_pb2.User(user_id=v) for v in victim_scores.keys()], scholarship_balance, 'Victim scholarship fund', msg_fn, merchant_weights=victim_scores.values()) class Bookie(object): _BET_SUBKEY = 'bets' _ledger_lock = threading.RLock() def __init__(self, store, bank, inventory): self._store = store self._bank = bank self._inventory = inventory def LookupBets(self, game, user: user_pb2.User = None, resolver=None): with self._ledger_lock: bets = self._GetBets(game) if user: user_id = user.user_id bets = {user_id: bets[user_id]} if user_id in bets else {} if resolver: bets = { user_id: [bet for bet in user_bets if bet.resolver == resolver ] for user_id, user_bets in bets.items() } bets = collections.defaultdict(list, bets) return bets def PlaceBet(self, game, bet, msg_fn, more=False): return self._store.RunInTransaction(self._PlaceBet, game, bet, more, msg_fn) def _PlaceBet(self, game, bet, more, msg_fn, *unused_args, **kwargs): bet.game = game.name with self._ledger_lock: tx = kwargs.get('tx') if not tx: logging.error('_PlaceBet can only be called with a transaction.') return bets = self._GetBets(game.name, tx=tx) prior_bet = None for b in bets[bet.user.user_id]: if bet.target == b.target: prior_bet = b logging.info('%s has a prior_bet for %s:%s => %s', bet.user, game.name, bet.target, prior_bet) break if more and prior_bet: bet.amount += prior_bet.amount if game.name == 'lottery': bet.amount = game.CapBet(bet.user, bet.amount, bet.resolver) net_amount = bet.amount - (prior_bet.amount if prior_bet else 0) if net_amount < 0: msg_fn(bet.user, 'Money on the table is not yours. Try a higher amount.') return False if prior_bet: details = 'Bet updated. Replaced %s with %s' % ( game.FormatBet(prior_bet), game.FormatBet(bet)) else: details = 'Bet placed. %s' % game.FormatBet(bet) if not self._bank.ProcessPayment(bet.user, BOOKIE_ACCOUNT, net_amount, details, msg_fn): return False if prior_bet: bets[bet.user.user_id].remove(prior_bet) bets[bet.user.user_id].append(bet) self._SetBets(game.name, bets, tx=tx) return True def SettleBets(self, game, resolver, msg_fn, *args, **kwargs): return self._store.RunInTransaction(self._SettleBets, game, resolver, msg_fn, *args, **kwargs) def _SettleBets(self, game, resolver, msg_fn, *args, **kwargs): with self._ledger_lock: tx = kwargs.get('tx') if not tx: logging.error('_SettleBets can only be called with a transaction.') return [] bets = self._GetBets(game.name, tx) if not bets: logging.warning('Tried to settle bets for %s, but no bets were found', game.name) return [] unresolved_bets = collections.defaultdict(list) filtered_bets = collections.defaultdict(list) for user_id, user_bets in bets.items(): for bet in user_bets: if not bet.resolver or bet.resolver == resolver: filtered_bets[user_id].append(bet) else: unresolved_bets[user_id].append(bet) if not filtered_bets: logging.info('No bets found for resolver %s', resolver) return [] winner_info, unused_bets, notifications = game.SettleBets( filtered_bets, msg_fn, *args, **kwargs) for user_id, user_bets in unresolved_bets.items(): if user_id in unused_bets: unused_bets[user_id] += user_bets else: unused_bets[user_id] = user_bets self._SetBets(game.name, unused_bets, tx=tx) for winner, winnings in winner_info: if isinstance(winnings, numbers.Number): if not self._bank.ProcessPayment(BOOKIE_ACCOUNT, winner, winnings, 'Gambling payout', msg_fn): logging.error('Couldn\'t pay %s %s for winning %s', winner, winnings, game.name) else: self._inventory.AddItem(winner, winnings) return notifications def _GetBets(self, row, tx=None): json_bets = self._store.GetJsonValue(row, self._BET_SUBKEY, tx) or {} bets = { u: [json_format.ParseDict(b, bet_pb2.Bet()) for b in user_bets ] for u, user_bets in json_bets.items() } return collections.defaultdict(list, bets) def _SetBets(self, row, bets, tx=None): json_bets = { u: [json_format.MessageToDict(b) for b in user_bets ] for u, user_bets in bets.items() } return self._store.SetJsonValue(row, self._BET_SUBKEY, json_bets, tx=tx) class Bank(object): _BALANCE_SUBKEY = 'bank:balance' _TRANSACTION_SUBKEY = 'bank:transaction' _MIN_OVERDRAFT_FEE = 5 _MAX_OVERDRAFT_FEE_PERCENT = 0.05 def __init__(self, store, bot_name): self._store = store self._bot_name = bot_name self._withdraw_lock = threading.RLock() def GetBalance(self, user): balance = self._store.GetValue(user.user_id, self._BALANCE_SUBKEY) if not balance: return 0 return util_lib.SafeCast(balance, int, 0) def GetUserBalances(self, plebs_only=False): user_balances = self._store.GetSubkey(self._BALANCE_SUBKEY) return { user_id: util_lib.SafeCast(balance, int, 0) for user_id, balance in user_balances if (not plebs_only or user_id not in HYPECENTS) and not user_id.startswith('http') } def GetTransactions(self, user): json_entries = self._store.GetHistoricalValues(user.user_id, self._TRANSACTION_SUBKEY, 5) return [ json_format.ParseDict(entry, bank_pb2.LedgerEntry()) for entry in json_entries ] def GetBankStats(self, plebs_only=False): user_balances = self.GetUserBalances(plebs_only=plebs_only) balance_sum = sum(user_balances.values()) return len(user_balances), balance_sum def MintNewHypeCoins(self): mint_balance = self.GetBalance(MINT_ACCOUNT) num_users, coins_in_circulation = self.GetBankStats() if mint_balance >= coins_in_circulation // 4: logging.info( 'Mint balance (%s) >= 25%% of market (%s), not minting new coins', util_lib.FormatHypecoins(mint_balance), util_lib.FormatHypecoins(coins_in_circulation)) return num_coins_to_mint = max( 5000, int(math.log(coins_in_circulation, 2) * num_users * 1000)) logging.info('Minting %s', util_lib.FormatHypecoins(num_coins_to_mint)) entry = bank_pb2.LedgerEntry( counterparty={ 'user_id': '_ether', 'display_name': 'Ether' }, amount=num_coins_to_mint, details='Minting') entry.create_time.GetCurrentTime() if not self._Deposit(MINT_ACCOUNT, num_coins_to_mint, entry, None): logging.error('Minting %s failed', util_lib.FormatHypecoins(num_coins_to_mint)) def ParseAmount(self, user, amount_str, msg_fn): def _IntAmount(match, unused_balance): return int(match.groups()[0]) def _HumanIntAmount(match, unused_balance): try: return int(util_lib.UnformatHypecoins(match.groups()[0])) except ValueError: return None def _HexAmount(match, unused_balance): return int(match.groups()[0], 16) def _RandomBalance(unused_match, balance): return random.randint(1, balance) def _MemeTeam(unused_match, unused_balance): return 'ayyy' parsers = ( (r'%s$' % self._bot_name, lambda x, y: 'You can\'t put a price on this bot.'), (r'(dank)? ?memes?$', _MemeTeam), (r'(-?[0-9]+)$', _IntAmount), (r'(?:0x)([0-9,a-f]+)$', _HexAmount), (r'(a )?positive int$', _RandomBalance), (r'(-?[0-9.]+ ?[A-Za-z]+)$', _HumanIntAmount), ) balance = self.GetBalance(user) amount_str = amount_str.lower().strip() if amount_str in messages.GAMBLE_STRINGS: return balance amount = None for parser in parsers: match = re.match(parser[0], amount_str) if match: amount = parser[1](match, balance) break if amount is None: amount = 'Unrecognized amount.' if isinstance(amount, six.string_types): msg_fn(None, amount) amount = None return amount def FineUser(self, user, amount, details, msg_fn): return self.ProcessPayment( user, BOOKIE_ACCOUNT, amount, 'Fine: %s' % details, msg_fn, can_overdraft=True)
Apache License 2.0
facebookresearch/fvcore
fvcore/transforms/transform.py
ScaleTransform.apply_image
python
def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray: if len(img.shape) == 4: h, w = img.shape[1:3] elif len(img.shape) in (2, 3): h, w = img.shape[:2] else: raise ("Unsupported input with shape of {}".format(img.shape)) assert ( self.h == h and self.w == w ), "Input size mismatch h w {}:{} -> {}:{}".format(self.h, self.w, h, w) interp_method = interp if interp is not None else self.interp if interp_method in ["linear", "bilinear", "bicubic"]: align_corners = False else: align_corners = None float_tensor = torch.nn.functional.interpolate( to_float_tensor(img), size=(self.new_h, self.new_w), mode=interp_method, align_corners=align_corners, ) return to_numpy(float_tensor, img.shape, img.dtype)
Resize the image(s). Args: img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. interp (str): interpolation methods. Options includes `nearest`, `linear` (3D-only), `bilinear`, `bicubic` (4D-only), and `area`. Details can be found in: https://pytorch.org/docs/stable/nn.functional.html Returns: ndarray: resized image(s).
https://github.com/facebookresearch/fvcore/blob/4525b814c8bb0f70510e37e68247c958010eb285/fvcore/transforms/transform.py#L509-L550
import inspect import pprint from abc import ABCMeta, abstractmethod from typing import Any, Callable, List, Optional, TypeVar import numpy as np import torch from .transform_util import to_float_tensor, to_numpy __all__ = [ "BlendTransform", "CropTransform", "PadTransform", "GridSampleTransform", "HFlipTransform", "VFlipTransform", "NoOpTransform", "ScaleTransform", "Transform", "TransformList", ] class Transform(metaclass=ABCMeta): def _set_attributes(self, params: Optional[List[Any]] = None) -> None: if params: for k, v in params.items(): if k != "self" and not k.startswith("_"): setattr(self, k, v) @abstractmethod def apply_image(self, img: np.ndarray): @abstractmethod def apply_coords(self, coords: np.ndarray): def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray: return self.apply_image(segmentation) def apply_box(self, box: np.ndarray) -> np.ndarray: idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten() coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2) coords = self.apply_coords(coords).reshape((-1, 4, 2)) minxy = coords.min(axis=1) maxxy = coords.max(axis=1) trans_boxes = np.concatenate((minxy, maxxy), axis=1) return trans_boxes def apply_polygons(self, polygons: list) -> list: return [self.apply_coords(p) for p in polygons] @classmethod def register_type(cls, data_type: str, func: Optional[Callable] = None): if func is None: def wrapper(decorated_func): assert decorated_func is not None cls.register_type(data_type, decorated_func) return decorated_func return wrapper assert callable( func ), "You can only register a callable to a Transform. Got {} instead.".format( func ) argspec = inspect.getfullargspec(func) assert len(argspec.args) == 2, ( "You can only register a function that takes two positional " "arguments to a Transform! Got a function with spec {}".format(str(argspec)) ) setattr(cls, "apply_" + data_type, func) def inverse(self) -> "Transform": raise NotImplementedError def __repr__(self): try: sig = inspect.signature(self.__init__) classname = type(self).__name__ argstr = [] for name, param in sig.parameters.items(): assert ( param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD ), "The default __repr__ doesn't support *args or **kwargs" assert hasattr(self, name), ( "Attribute {} not found! " "Default __repr__ only works if attributes match the constructor.".format( name ) ) attr = getattr(self, name) default = param.default if default is attr: continue attr_str = pprint.pformat(attr) if "\n" in attr_str: attr_str = "..." argstr.append("{}={}".format(name, attr_str)) return "{}({})".format(classname, ", ".join(argstr)) except AssertionError: return super().__repr__() _T = TypeVar("_T") class TransformList(Transform): def __init__(self, transforms: List[Transform]): super().__init__() tfms_flatten = [] for t in transforms: assert isinstance( t, Transform ), f"TransformList requires a list of Transform. Got type {type(t)}!" if isinstance(t, TransformList): tfms_flatten.extend(t.transforms) else: tfms_flatten.append(t) self.transforms = tfms_flatten def _apply(self, x: _T, meth: str) -> _T: for t in self.transforms: x = getattr(t, meth)(x) return x def __getattribute__(self, name: str): if name.startswith("apply_"): return lambda x: self._apply(x, name) return super().__getattribute__(name) def __add__(self, other: "TransformList") -> "TransformList": others = other.transforms if isinstance(other, TransformList) else [other] return TransformList(self.transforms + others) def __iadd__(self, other: "TransformList") -> "TransformList": others = other.transforms if isinstance(other, TransformList) else [other] self.transforms.extend(others) return self def __radd__(self, other: "TransformList") -> "TransformList": others = other.transforms if isinstance(other, TransformList) else [other] return TransformList(others + self.transforms) def __len__(self) -> int: return len(self.transforms) def __getitem__(self, idx) -> Transform: return self.transforms[idx] def inverse(self) -> "TransformList": return TransformList([x.inverse() for x in self.transforms[::-1]]) def __repr__(self) -> str: msgs = [str(t) for t in self.transforms] return "TransformList[{}]".format(", ".join(msgs)) __str__ = __repr__ def apply_coords(self, x): raise NotImplementedError def apply_image(self, x): raise NotImplementedError class HFlipTransform(Transform): def __init__(self, width: int): super().__init__() self._set_attributes(locals()) def apply_image(self, img: np.ndarray) -> np.ndarray: if img.ndim <= 3: return np.flip(img, axis=1) else: return np.flip(img, axis=-2) def apply_coords(self, coords: np.ndarray) -> np.ndarray: coords[:, 0] = self.width - coords[:, 0] return coords def inverse(self) -> Transform: return self class VFlipTransform(Transform): def __init__(self, height: int): super().__init__() self._set_attributes(locals()) def apply_image(self, img: np.ndarray) -> np.ndarray: tensor = torch.from_numpy(np.ascontiguousarray(img)) if len(tensor.shape) == 2: tensor = tensor.flip((-2)) elif len(tensor.shape) > 2: tensor = tensor.flip((-3)) return tensor.numpy() def apply_coords(self, coords: np.ndarray) -> np.ndarray: coords[:, 1] = self.height - coords[:, 1] return coords def inverse(self) -> Transform: return self class NoOpTransform(Transform): def __init__(self): super().__init__() def apply_image(self, img: np.ndarray) -> np.ndarray: return img def apply_coords(self, coords: np.ndarray) -> np.ndarray: return coords def inverse(self) -> Transform: return self def __getattr__(self, name: str): if name.startswith("apply_"): return lambda x: x raise AttributeError("NoOpTransform object has no attribute {}".format(name)) class ScaleTransform(Transform): def __init__(self, h: int, w: int, new_h: int, new_w: int, interp: str = None): super().__init__() self._set_attributes(locals())
Apache License 2.0
pepkit/looper
oldtests/models/pipeline_interface/test_PipelineInterface.py
ConstructorPathParsingTests.apply_envvars
python
def apply_envvars(self, request): if "envvars" not in request.fixturenames: return original_envvars = {} new_envvars = request.getfixturevalue("envvars") for name, value in new_envvars.items(): try: original_envvars[name] = os.environ[name] except KeyError: pass os.environ[name] = value def restore(): for k, v in new_envvars.items(): try: os.environ[k] = original_envvars[k] except KeyError: del os.environ[k] request.addfinalizer(restore)
Use environment variables temporarily.
https://github.com/pepkit/looper/blob/cd1c7d4da84c631a4b6565b7a80926c7ac3640c3/oldtests/models/pipeline_interface/test_PipelineInterface.py#L457-L482
import copy import inspect import itertools import logging import os import random import sys import warnings import pytest import yaml from attmap import PathExAttMap from divvy import DEFAULT_COMPUTE_RESOURCES_NAME from looper.const import * from looper.pipeline_interface import PipelineInterface, PL_KEY, PROTOMAP_KEY, RESOURCES_KEY from looper.project import Project from looper.exceptions import InvalidResourceSpecificationException, MissingPipelineConfigurationException, PipelineInterfaceConfigError from peppy import Project, Sample from peppy.const import * from ubiquerg import powerset from .conftest import ATAC_PROTOCOL_NAME, write_config_data from oldtests.helpers import remove_piface_requirements __author__ = "Vince Reuter" __email__ = "vreuter@virginia.edu" _LOGGER = logging.getLogger(__name__) PIPELINE_NAMES = ["ATACseq", "WGBS"] EXTENSIONS = [".py", ".sh", ".R"] def pytest_generate_tests(metafunc): try: parameters = metafunc.cls.PARAMETERS except AttributeError: _LOGGER.debug("No indirect parameterization for test class: '{}'". format(metafunc.cls)) else: for name, values in parameters.items(): metafunc.parametrize(argnames=name, argvalues=values) @pytest.fixture(scope="function") def basic_pipe_iface_data(request): extension = request.getfixturevalue("extension") if "extension" in request.fixturenames else ".py" return {pipe_name + extension: {"name": pipe_name} for pipe_name in PIPELINE_NAMES} @pytest.fixture def bundled_piface(request): pipelines = request.getfixturevalue("basic_pipe_iface_data") return {PROTOMAP_KEY: {"ATAC": "ATACSeq.py"}, PL_KEY: pipelines} @pytest.fixture(scope="function") def pi_with_resources(request, bundled_piface, resources): if "use_new_file_size" in request.fixturenames: file_size_name = "min_file_size" if request.getfixturevalue("use_new_file_size") else "file_size" for rp_data in resources.values(): size1 = rp_data.pop("file_size", None) size2 = rp_data.pop("min_file_size", None) size = size1 or size2 if size: rp_data[file_size_name] = size pipe_iface_config = PipelineInterface(bundled_piface) for pipe_data in pipe_iface_config.pipelines.values(): pipe_data[RESOURCES_KEY] = resources return pipe_iface_config @pytest.mark.parametrize(argnames="from_file", argvalues=[False, True]) def test_basic_construction(tmpdir, from_file, bundled_piface): if from_file: pipe_iface_config = tmpdir.join("pipe-iface-conf.yaml").strpath with open(tmpdir.join("pipe-iface-conf.yaml").strpath, 'w') as f: yaml.safe_dump(bundled_piface, f) else: pipe_iface_config = bundled_piface pi = PipelineInterface(pipe_iface_config) assert PL_KEY in pi, "Missing pipeline key ({})".format(PL_KEY) assert PROTOMAP_KEY in pi, "Missing protocol mapping key: ({})".format(PROTOMAP_KEY) assert pi.pipe_iface_file == (pipe_iface_config if from_file else None) if from_file: assert pi.pipelines_path == tmpdir.strpath else: assert pi.pipelines_path is None assert bundled_piface[PL_KEY] == remove_piface_requirements(pi[PL_KEY]) assert PathExAttMap(bundled_piface[PROTOMAP_KEY]) == pi[PROTOMAP_KEY] assert pi.pipelines == pi[PL_KEY] assert list(pi.pipelines.keys()) == pi.pipeline_names def test_iterpipes(pi_with_resources): missing, unequal = [], [] seen = 0 known = pi_with_resources[PL_KEY] assert len(known) > 0 def get_err_msg(obs, context): return "{} of {} known pipeline(s) {}: {}".format( len(obs), len(known), context, ", ".join(obs)) for pipe, data in pi_with_resources.iterpipes(): seen += 1 if pipe not in known: missing.append(pipe) elif data != pi_with_resources.select_pipeline(pipe): unequal.append(pipe) assert len(known) == seen assert [] == missing, get_err_msg(missing, "missing") try: assert [] == unequal except AssertionError: print(get_err_msg(unequal, "with unmatched data")) print("KNOWN: {}".format(known)) print("ITERPIPES: {}".format(", ".join(pi_with_resources.iterpipes()))) raise @pytest.mark.parametrize( "exclude", powerset(PipelineInterface.REQUIRED_SECTIONS)) def test_requires_pipelines_and_protocol_mapping( basic_pipe_iface_data, bundled_piface, exclude): pipe_iface_config = copy.deepcopy(bundled_piface) missing = [s for s in PipelineInterface.REQUIRED_SECTIONS if s not in pipe_iface_config] assert [] == missing, "Missing PI config section(s): {}".format(", ".join(missing)) pipe_iface_config = { k: v for k, v in pipe_iface_config.items() if k not in exclude} assert [] == [s for s in exclude if s in pipe_iface_config] if exclude: with pytest.raises(PipelineInterfaceConfigError): PipelineInterface(pipe_iface_config) else: PipelineInterface(pipe_iface_config) @pytest.mark.parametrize( argnames="funcname_and_kwargs", argvalues=[("choose_resource_package", {"file_size": 4}), ("get_arg_string", {"sample": Sample( {"sample_name": "arbitrary-sample-name"})}), ("get_attribute", {"attribute_key": "irrelevant-attr-name"}), ("get_pipeline_name", {})]) @pytest.mark.parametrize(argnames="use_resources", argvalues=[False, True]) def test_unconfigured_pipeline_exception( funcname_and_kwargs, use_resources, pi_with_resources): pi = pi_with_resources if not use_resources: for pipeline in pi.pipelines.values(): try: del pipeline[RESOURCES_KEY][DEFAULT_COMPUTE_RESOURCES_NAME] except KeyError: pass def parse_param_names(f): return inspect.getargspec(f).args if sys.version_info < (3, 0) else [p for p in inspect.signature(f).parameters.keys()] funcname, kwargs = funcname_and_kwargs func = getattr(pi, funcname) required_parameters = parse_param_names(func) for parameter in ["pipeline_name", "pipeline"]: if parameter in required_parameters and parameter not in kwargs: kwargs[parameter] = "missing-pipeline" with pytest.raises(MissingPipelineConfigurationException): func.__call__(**kwargs) @pytest.mark.parametrize( argnames=["pipe_name", "extension"], argvalues=list(itertools.product(PIPELINE_NAMES, EXTENSIONS))) def test_prohibition_of_direct_pipeline_access( recwarn, pipe_name, extension, pi_with_resources): pk = pipe_name + extension assert pk in pi_with_resources.pipelines warnings.simplefilter('always') assert 0 == len(recwarn) _ = pi_with_resources.select_pipeline(pk) assert 0 == len(recwarn) with pytest.raises(KeyError): pi_with_resources[pk] class PipelineInterfaceNameResolutionTests: @pytest.mark.parametrize( argnames="name_and_ext_pairs", argvalues=itertools.combinations( itertools.product(PIPELINE_NAMES, EXTENSIONS), 2)) def test_get_pipeline_name_explicit(self, name_and_ext_pairs): names, extensions = zip(*name_and_ext_pairs) pipelines = [name + ext for name, ext in name_and_ext_pairs] pi_conf_data = {pipeline: {"name": name} for pipeline, name in zip(pipelines, names)} pi = PipelineInterface({PROTOMAP_KEY: {"ATAC": "ATACSeq.py"}, PL_KEY: pi_conf_data}) for pipeline, expected_name in zip(pipelines, names): assert expected_name == pi.get_pipeline_name(pipeline) class PipelineInterfaceResourcePackageTests: PARAMETERS = {"use_new_file_size": [False, True]} def test_requires_default( self, use_new_file_size, pi_with_resources, huge_resources): pi = pi_with_resources for name, pipeline in pi.iterpipes(): try: del pipeline[RESOURCES_KEY][DEFAULT_COMPUTE_RESOURCES_NAME] except KeyError: pass assert "default" not in pipeline[RESOURCES_KEY] with pytest.raises(InvalidResourceSpecificationException): pi.choose_resource_package( name, file_size=huge_resources["file_size"] + 1) def test_negative_file_size_request( self, use_new_file_size, pi_with_resources): pi = pi_with_resources for pipeline_name in pi.pipeline_names: negative_file_size = -10 * random.random() with pytest.raises(ValueError): pi.choose_resource_package( pipeline_name, file_size=negative_file_size) @pytest.mark.parametrize(argnames="file_size", argvalues=[0, 10, 101]) def test_resources_not_required( self, use_new_file_size, file_size, pi_with_resources): pi = pi_with_resources for pipe_data in pi.pipelines.values(): del pipe_data[RESOURCES_KEY] for pipe_name in pi.pipeline_names: assert {} == pi.choose_resource_package(pipe_name, int(file_size)) assert {} == pi.choose_resource_package(pipe_name, float(file_size)) @pytest.mark.parametrize( argnames=["file_size", "expected_package_name"], argvalues=[(0, "default"), (4, "default"), (16, "midsize"), (64, "huge")]) def test_selects_proper_resource_package( self, use_new_file_size, pi_with_resources, file_size, expected_package_name, midsize_resources): for pipe_data in pi_with_resources.pipelines.values(): pipe_data[RESOURCES_KEY].update( {"midsize": copy.deepcopy(midsize_resources)}) for pipe_name, pipe_data in pi_with_resources.iterpipes(): observed_package = pi_with_resources.choose_resource_package( pipe_name, file_size) expected_package = copy.deepcopy( pipe_data[RESOURCES_KEY][expected_package_name]) assert expected_package == observed_package def test_negative_file_size_prohibited( self, use_new_file_size, pi_with_resources): file_size_attr = "min_file_size" if use_new_file_size else "file_size" for pipe_data in pi_with_resources.pipelines.values(): for package_data in pipe_data[RESOURCES_KEY].values(): package_data[file_size_attr] = -5 * random.random() for pipe_name in pi_with_resources.pipeline_names: file_size_request = random.randrange(1, 11) with pytest.raises(ValueError): pi_with_resources.choose_resource_package( pipe_name, file_size_request) def test_file_size_spec_not_required_for_default( self, use_new_file_size, bundled_piface, default_resources, huge_resources, midsize_resources): def clear_file_size(resource_package): for fs_var_name in ("file_size", "min_file_size"): if fs_var_name in resource_package: del resource_package[fs_var_name] resources_data = dict(zip( ["default", "midsize", "huge"], [copy.deepcopy(data) for data in [default_resources, midsize_resources, huge_resources]])) for pack_name, pack_data in resources_data.items(): if pack_name == "default": clear_file_size(pack_data) elif use_new_file_size: pack_data["min_file_size"] = pack_data.pop("file_size") pipe_iface_data = copy.deepcopy(bundled_piface) for pipe_data in pipe_iface_data[PL_KEY].values(): pipe_data[RESOURCES_KEY] = resources_data pi = PipelineInterface(pipe_iface_data) for pipe_name, pipe_data in pi.iterpipes(): default_resource_package = pipe_data[RESOURCES_KEY][DEFAULT_COMPUTE_RESOURCES_NAME] clear_file_size(default_resource_package) assert default_resource_package == pi.choose_resource_package(pipe_name, 0.001) @pytest.mark.parametrize( argnames="min_file_size", argvalues=[-1, 1]) def test_default_package_new_name_zero_size( self, use_new_file_size, min_file_size, pi_with_resources): for pipe_name, pipe_data in pi_with_resources.iterpipes(): default_resource_package = pipe_data[RESOURCES_KEY]["default"] if use_new_file_size: if "file_size" in default_resource_package: del default_resource_package["file_size"] default_resource_package["min_file_size"] = min_file_size else: if "min_file_size" in default_resource_package: del default_resource_package["min_file_size"] default_resource_package["file_size"] = min_file_size observed_resource_package = pi_with_resources.choose_resource_package(pipe_name, 0) expected_resource_package = copy.deepcopy(default_resource_package) if "file_size" in expected_resource_package: del expected_resource_package["file_size"] expected_resource_package["min_file_size"] = 0 assert expected_resource_package == observed_resource_package def test_file_size_spec_required_for_non_default_packages( self, use_new_file_size, bundled_piface, default_resources, huge_resources): resource_package_data = { "default": copy.deepcopy(default_resources), "huge": copy.deepcopy(huge_resources)} del resource_package_data["huge"]["file_size"] if use_new_file_size: resource_package_data["default"]["min_file_size"] = resource_package_data["default"].pop("file_size") for pipe_data in bundled_piface[PL_KEY].values(): pipe_data[RESOURCES_KEY] = resource_package_data pi = PipelineInterface(bundled_piface) for pipe_name in pi.pipeline_names: with pytest.raises(KeyError): pi.choose_resource_package(pipe_name, random.randrange(0, 10)) class ConstructorPathParsingTests: ADD_PATH = [True, False] PIPELINE_KEYS = ["ATACSeq.py", "no_path.py"] RELATIVE_PATH_DATA = [ ("./arbitrary-test-pipelines", {}, "./arbitrary-test-pipelines"), ("path/to/$TEMP_PIPE_LOCS", {"TEMP_PIPE_LOCS": "validation-value"}, "path/to/validation-value")] ABSOLUTE_PATHS = [ os.path.join("~", "code_home", "bioinformatics"), os.path.join("$TEMP_TEST_HOME", "subfolder"), os.path.join("~", "$TEMPORARY_SUBFOLDER", "leaf")] ABSPATH_ENVVARS = {"TEMP_TEST_HOME": "tmptest-home-folder", "TEMPORARY_SUBFOLDER": "temp-subfolder"} EXPECTED_PATHS_ABSOLUTE = [ os.path.join(os.path.expanduser("~"), "code_home", "bioinformatics"), os.path.join("tmptest-home-folder", "subfolder"), os.path.join(os.path.expanduser("~"), "temp-subfolder", "leaf")] @pytest.fixture(scope="function") def pipe_iface_data(self, piface_config_bundles): return dict(zip(self.PIPELINE_KEYS, piface_config_bundles)) @pytest.fixture(scope="function") def bundled_piface(self, pipe_iface_data): return {PROTOMAP_KEY: {"ATAC": "ATACSeq.py"}, PL_KEY: pipe_iface_data} @pytest.fixture(scope="function", autouse=True)
BSD 2-Clause Simplified License
vikifox/monster
elfinder/volumes/storage.py
ElfinderVolumeStorage._save_uploaded
python
def _save_uploaded(self, uploaded_file, dir_, name, **kwargs): path = self._join_path(dir_, name) first_chunk = kwargs.get('first_chunk',False) chunk = kwargs.get('chunk',False) if chunk is False: target = self._fopen(path, 'w+') else: if first_chunk is True: target = self._fopen(path, 'w+') else: target = self._fopen(path, 'a+') for chunk in uploaded_file.chunks(): target.write(chunk) target.close() return path
Save the Django `UploadedFile <https://docs.djangoproject.com/en/dev/topics/http/file-uploads/#django.core.files.uploadedfile.UploadedFile>`_ object and return its new path.
https://github.com/vikifox/monster/blob/bac9b7da204c3eee344f55bb2187df38ef3b3d4c/elfinder/volumes/storage.py#L458-L478
import os, re, magic, time, tempfile, shutil, mimetypes try: from PIL import Image except ImportError: import Image from django.core.files.storage import FileSystemStorage from django.core.files.base import ContentFile from django.core.files import File as DjangoFile from importlib import import_module from elfinder.exceptions import NotAnImageError, ElfinderErrorMessages from base import ElfinderVolumeDriver class ElfinderVolumeStorage(ElfinderVolumeDriver): _driver_id = 's' def mount(self, opts): if "key_label" in opts['storageKwArgs'].keys(): self._key_label = opts['storageKwArgs']['key_label'] del opts['storageKwArgs']['key_label'] if not 'storage' in opts: if not 'storageClass' in opts: opts['storage'] = FileSystemStorage() else: if isinstance(opts['storageClass'], basestring): split = opts['storageClass'].split('.') storage_module = import_module('.'.join(split[:-1])) opts['storageClass'] = getattr(storage_module, split[-1]) if not 'storageKwArgs' in opts: opts['storageKwArgs'] = {} opts['storage'] = opts['storageClass'](**opts['storageKwArgs']) try: opts['storage'].listdir(self._root) opts['storage'].url(self._root) except NotImplementedError: raise Exception('Storage %s should implement both the listdir() and url() methods to be valid for use with yawd-elfinder.' % self._options['storage'].__class__) self._options['path'] = '.' if (not 'URL' in opts or not opts['URL']): self._options['URL'] = opts['storage'].url(self._root) if not 'alias' in opts or not opts['alias']: self._options['alias'] = opts['storage'].__class__.__name__ return super(ElfinderVolumeStorage, self).mount(opts) def _configure(self): if not self._isabs(self._options['tmbPath']): super(ElfinderVolumeStorage, self)._configure() if not self._options['tmbURL'] and self._options['URL']: self._options['tmbURL'] = self._options['URL'] + self._options['tmbPath'][len(self._root)+1:].replace(self._separator, '/') + '/' elif self._isabs(self._options['tmbPath']): raise Exception('tmbPath must be relative') try: self._options['storage'].delete(self.encode(str(time.time()))) except NotImplementedError: if not 'rm' in self._options['disabled']: self._options['disabled'].append('rm') except: pass if not 'rmDir' in self._options or not callable(self._options['rmDir']): if isinstance(self._options['storage'], FileSystemStorage): self._options['rmDir'] = self._rmdir_callable elif not 'rmdir' in self._options['disabled']: pass def _dirname(self, path): return self._separator.join(path.split(self._separator)[:-1]) def _basename(self, path): return path.split(self._separator)[-1] def _join_path(self, path1, path2): if self._separator == '\\' and re.match(r'([a-zA-Z]+:)?\\$', path2): return path2 elif path2.startswith(self._separator): return path2 if not path1.endswith(self._separator): return '%s%s%s' % (path1, self._separator, path2) else: return '%s%s' % (path1, path2) def _normpath(self, path): if path[-1] == self._separator: return path[:-1] return path def _get_available_name(self, dir_, name, ext, i): path = self._options['storage'].get_available_name(self._join_path(dir_, '%s%s' % (name, ext))) return self._basename(path) def _stat(self, path): stat = {} if not self._options['storage'].exists(path): raise os.error try: stat['mime'] = self.mimetype(path) try: stat['size'] = self._options['storage'].size(path) except NotImplementedError: stat['size'] = 0 except: stat['mime'] = 'directory' stat['size'] = 0 try: stat['ts'] = time.mktime(self._options['storage'].modified_time(path).timetuple()) except NotImplementedError: stat['ts'] = '' stat['read'] = True stat['write'] = True return stat def _subdirs(self, path): try: for entry in self._options['storage'].listdir(path)[0]: if not self._attr(self._join_path(path, entry), 'hidden'): return True except NotImplementedError: pass def _dimensions(self, path): try: im = self._openimage(path) return '%sx%s' % im.size except: raise NotAnImageError def _mimetype(self, path): file_name = str(path.split("/")[-1]).strip() if re.search(r'^\./proc/', path) or re.search(r'^\./sys/', path): if file_name in self._files: try: fp = self._fopen(path) mime = magic.Magic(mime=True).from_buffer(fp.read(10)) fp.close() return mime except: return "application/empty" if re.search(r'^\./dev/', path) and self._files[file_name] in 'l': return "application/empty" if file_name in self._files: if self._files[file_name] not in '-l': return "application/empty" fp = self._fopen(path) mime = magic.Magic(mime=True).from_buffer(fp.read(10)) fp.close() return mime def _scandir(self, path): try: all_ = self._options['storage'].listdir(path) return map(lambda x: self._join_path(path, x), all_[0]+all_[1]) except NotImplementedError: return [] def _fopen(self, path, mode='rb'): return self._options['storage'].open(path, mode) def _fclose(self, fp, **kwargs): return fp.close() def _openimage(self, path): fp = self._fopen(path) tmp_file = tempfile.TemporaryFile() tmp_file.write(fp.read()) fp.close() tmp_file.seek(0) im = Image.open(tmp_file) return im def _saveimage(self, im, path, form): tmp_file = tempfile.TemporaryFile() im.save(tmp_file, form) tmp_file.seek(0) fp = self._fopen(path, 'w+') fp.write(tmp_file.read()) tmp_file.close() fp.close() def _mkdir(self, path, mode=None): fname = '.%s-mkdir' % self.encode(path) self._mkfile(path, fname) self._unlink(self._join_path(path, fname)) return path def _mkfile(self, path, name): try: return self._options['storage'].save(self._join_path(path, name), ContentFile('')) except: raise os.error def _copy(self, source, target_dir, name): fp = self._fopen(source) tmp_file = tempfile.NamedTemporaryFile() tmp_file.write(fp.read()) fp.close() self._options['storage'].save(self._join_path(target_dir, name), DjangoFile(tmp_file)) tmp_file.close() def _move(self, source, target_dir, name): stat = self.stat(source) try: if stat['mime'] == 'directory': dest = self._join_path(target_dir, name) self._mkdir(dest) for p in self._get_cached_dir(source): self._move(p, dest, self._basename(p)) self._rmdir(source) else: self._copy(source, target_dir, name) self._unlink(source) except: raise os.error return self._join_path(target_dir, name) def _unlink(self, path): try: self._options['storage'].delete(path) return True except: return False def _rmdir(self, path): if 'rmDir' in self._options and callable(self._options['rmDir']): return self._options['rmDir'](path, self._options['storage']) raise os.error def _rmdir_callable(self, path, storage): return os.rmdir(self._join_path(storage.location, path)) def _save(self, fp, dir_, name): tmp_file = tempfile.NamedTemporaryFile() tmp_file.write(fp.read()) fp.close() tmp_file.seek(0) path = self._join_path(dir_, name) self._options['storage'].save(path, DjangoFile(tmp_file)) tmp_file.close() return path
Apache License 2.0
azure/azure-storage-python
azure-storage-file/azure/storage/file/fileservice.py
FileService.set_share_metadata
python
def set_share_metadata(self, share_name, metadata=None, timeout=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'metadata', 'timeout': _int_to_str(timeout), } _add_metadata_headers(metadata, request) self._perform_request(request)
Sets one or more user-defined name-value pairs for the specified share. Each call to this operation replaces all existing metadata attached to the share. To remove all metadata from the share, call this operation with no metadata dict. :param str share_name: Name of existing share. :param metadata: A dict containing name-value pairs to associate with the share as metadata. Example: {'category':'test'} :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds.
https://github.com/azure/azure-storage-python/blob/4306898850dd21617644fc537a57d025e833db74/azure-storage-file/azure/storage/file/fileservice.py#L795-L823
import sys from datetime import datetime import math from os import path from azure.common import AzureHttpError from azure.storage.common._auth import ( _StorageSharedKeyAuthentication, _StorageSASAuthentication, ) from azure.storage.common._common_conversion import ( _int_to_str, _to_str, _get_content_md5, ) from azure.storage.common._connection import _ServiceParameters from azure.storage.common._constants import ( SERVICE_HOST_BASE, DEFAULT_PROTOCOL, DEV_ACCOUNT_NAME, ) from azure.storage.common._deserialization import ( _convert_xml_to_service_properties, _convert_xml_to_signed_identifiers, _parse_metadata, _parse_properties, _parse_length_from_content_range, ) from azure.storage.common._error import ( _dont_fail_not_exist, _dont_fail_on_exist, _validate_not_none, _validate_type_bytes, _ERROR_VALUE_NEGATIVE, _ERROR_STORAGE_MISSING_INFO, _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES, _ERROR_PARALLEL_NOT_SEEKABLE, _validate_access_policies, ) from azure.storage.common._http import HTTPRequest from azure.storage.common._serialization import ( _get_request_body, _get_data_bytes_only, _convert_signed_identifiers_to_xml, _convert_service_properties_to_xml, _add_metadata_headers, ) from azure.storage.common.models import ( Services, ListGenerator, _OperationContext, ) from .sharedaccesssignature import ( FileSharedAccessSignature, ) from azure.storage.common.storageclient import StorageClient from ._deserialization import ( _convert_xml_to_shares, _convert_xml_to_directories_and_files, _convert_xml_to_handles, _parse_close_handle_response, _convert_xml_to_ranges, _convert_xml_to_share_stats, _parse_file, _parse_share, _parse_snapshot_share, _parse_directory, _parse_permission_key, _parse_permission) from ._download_chunking import _download_file_chunks from ._serialization import ( _get_path, _validate_and_format_range_headers, _validate_and_return_file_permission) from ._upload_chunking import _upload_file_chunks from .models import ( FileProperties, SMBProperties) from ._constants import ( X_MS_VERSION, __version__ as package_version, ) _SHARE_NOT_FOUND_ERROR_CODE = 'ShareNotFound' _PARENT_NOT_FOUND_ERROR_CODE = 'ParentNotFound' _RESOURCE_NOT_FOUND_ERROR_CODE = 'ResourceNotFound' _RESOURCE_ALREADY_EXISTS_ERROR_CODE = 'ResourceAlreadyExists' _SHARE_ALREADY_EXISTS_ERROR_CODE = 'ShareAlreadyExists' _GB = 1024 * 1024 * 1024 if sys.version_info >= (3,): from io import BytesIO else: from cStringIO import StringIO as BytesIO class FileService(StorageClient): MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024 MAX_RANGE_SIZE = 4 * 1024 * 1024 def __init__(self, account_name=None, account_key=None, sas_token=None, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, request_session=None, connection_string=None, socket_timeout=None): service_params = _ServiceParameters.get_service_parameters( 'file', account_name=account_name, account_key=account_key, sas_token=sas_token, protocol=protocol, endpoint_suffix=endpoint_suffix, request_session=request_session, connection_string=connection_string, socket_timeout=socket_timeout) super(FileService, self).__init__(service_params) if self.account_name == DEV_ACCOUNT_NAME: raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES) if self.account_key: self.authentication = _StorageSharedKeyAuthentication( self.account_name, self.account_key, ) elif self.sas_token: self.authentication = _StorageSASAuthentication(self.sas_token) else: raise ValueError(_ERROR_STORAGE_MISSING_INFO) self._X_MS_VERSION = X_MS_VERSION self._update_user_agent_string(package_version) def make_file_url(self, share_name, directory_name, file_name, protocol=None, sas_token=None): if directory_name is None: url = '{}://{}/{}/{}'.format( protocol or self.protocol, self.primary_endpoint, share_name, file_name, ) else: url = '{}://{}/{}/{}/{}'.format( protocol or self.protocol, self.primary_endpoint, share_name, directory_name, file_name, ) if sas_token: url += (sas_token if sas_token.startswith('?') else '?' + sas_token) return url def generate_account_shared_access_signature(self, resource_types, permission, expiry, start=None, ip=None, protocol=None): _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = FileSharedAccessSignature(self.account_name, self.account_key) return sas.generate_account(Services.FILE, resource_types, permission, expiry, start=start, ip=ip, protocol=protocol) def generate_share_shared_access_signature(self, share_name, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None): _validate_not_none('share_name', share_name) _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = FileSharedAccessSignature(self.account_name, self.account_key) return sas.generate_share( share_name, permission, expiry, start=start, id=id, ip=ip, protocol=protocol, cache_control=cache_control, content_disposition=content_disposition, content_encoding=content_encoding, content_language=content_language, content_type=content_type, ) def generate_file_shared_access_signature(self, share_name, directory_name=None, file_name=None, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None): _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = FileSharedAccessSignature(self.account_name, self.account_key) return sas.generate_file( share_name, directory_name, file_name, permission, expiry, start=start, id=id, ip=ip, protocol=protocol, cache_control=cache_control, content_disposition=content_disposition, content_encoding=content_encoding, content_language=content_language, content_type=content_type, ) def set_file_service_properties(self, hour_metrics=None, minute_metrics=None, cors=None, timeout=None): request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path() request.query = { 'restype': 'service', 'comp': 'properties', 'timeout': _int_to_str(timeout), } request.body = _get_request_body( _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors)) self._perform_request(request) def get_file_service_properties(self, timeout=None): request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path() request.query = { 'restype': 'service', 'comp': 'properties', 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_service_properties) def list_shares(self, prefix=None, marker=None, num_results=None, include_metadata=False, timeout=None, include_snapshots=False): include = 'snapshots' if include_snapshots else None if include_metadata: if include is not None: include = include + ',metadata' else: include = 'metadata' operation_context = _OperationContext(location_lock=True) kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, 'include': include, 'timeout': timeout, '_context': operation_context} resp = self._list_shares(**kwargs) return ListGenerator(resp, self._list_shares, (), kwargs) def _list_shares(self, prefix=None, marker=None, max_results=None, include=None, timeout=None, _context=None): request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path() request.query = { 'comp': 'list', 'prefix': _to_str(prefix), 'marker': _to_str(marker), 'maxresults': _int_to_str(max_results), 'include': _to_str(include), 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_shares, operation_context=_context) def create_share(self, share_name, metadata=None, quota=None, fail_on_exist=False, timeout=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-share-quota': _int_to_str(quota) } _add_metadata_headers(metadata, request) if not fail_on_exist: try: self._perform_request(request, expected_errors=[_SHARE_ALREADY_EXISTS_ERROR_CODE]) return True except AzureHttpError as ex: _dont_fail_on_exist(ex) return False else: self._perform_request(request) return True def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'snapshot', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-share-quota': _int_to_str(quota) } _add_metadata_headers(metadata, request) return self._perform_request(request, _parse_snapshot_share, [share_name]) def get_share_properties(self, share_name, timeout=None, snapshot=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot) } return self._perform_request(request, _parse_share, [share_name]) def set_share_properties(self, share_name, quota, timeout=None): _validate_not_none('share_name', share_name) _validate_not_none('quota', quota) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'properties', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-share-quota': _int_to_str(quota) } self._perform_request(request) def get_share_metadata(self, share_name, timeout=None, snapshot=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'metadata', 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot), } return self._perform_request(request, _parse_metadata)
MIT License
junsukchoe/adl
tensorpack/dataflow/imgaug/imgproc.py
Saturation.__init__
python
def __init__(self, alpha=0.4, rgb=True): super(Saturation, self).__init__() rgb = bool(rgb) assert alpha < 1 self._init(locals())
Args: alpha(float): maximum saturation change. rgb (bool): whether input is RGB or BGR.
https://github.com/junsukchoe/adl/blob/dab2e78163bd96970ec9ae41de62835332dbf4fe/tensorpack/dataflow/imgaug/imgproc.py#L228-L237
import numpy as np import cv2 from .base import PhotometricAugmentor __all__ = ['Hue', 'Brightness', 'BrightnessScale', 'Contrast', 'MeanVarianceNormalize', 'GaussianBlur', 'Gamma', 'Clip', 'Saturation', 'Lighting', 'MinMaxNormalize'] class Hue(PhotometricAugmentor): def __init__(self, range=(0, 180), rgb=True): super(Hue, self).__init__() rgb = bool(rgb) self._init(locals()) def _get_augment_params(self, _): return self._rand_range(*self.range) def _augment(self, img, hue): m = cv2.COLOR_BGR2HSV if not self.rgb else cv2.COLOR_RGB2HSV hsv = cv2.cvtColor(img, m) if hsv.dtype.itemsize == 1: hsv[..., 0] = (hsv[..., 0] + hue) % 180 else: hsv[..., 0] = (hsv[..., 0] + 2 * hue) % 360 m = cv2.COLOR_HSV2BGR if not self.rgb else cv2.COLOR_HSV2RGB img = cv2.cvtColor(hsv, m) return img class Brightness(PhotometricAugmentor): def __init__(self, delta, clip=True): super(Brightness, self).__init__() assert delta > 0 self._init(locals()) def _get_augment_params(self, _): return self._rand_range(-self.delta, self.delta) def _augment(self, img, v): old_dtype = img.dtype img = img.astype('float32') img += v if self.clip or old_dtype == np.uint8: img = np.clip(img, 0, 255) return img.astype(old_dtype) class BrightnessScale(PhotometricAugmentor): def __init__(self, range, clip=True): super(BrightnessScale, self).__init__() self._init(locals()) def _get_augment_params(self, _): return self._rand_range(*self.range) def _augment(self, img, v): old_dtype = img.dtype img = img.astype('float32') img *= v if self.clip or old_dtype == np.uint8: img = np.clip(img, 0, 255) return img.astype(old_dtype) class Contrast(PhotometricAugmentor): def __init__(self, factor_range, rgb=None, clip=True): super(Contrast, self).__init__() self._init(locals()) def _get_augment_params(self, _): return self._rand_range(*self.factor_range) def _augment(self, img, r): old_dtype = img.dtype if img.ndim == 3: if self.rgb is not None: m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY grey = cv2.cvtColor(img.astype('float32'), m) mean = np.mean(grey) else: mean = np.mean(img, axis=(0, 1), keepdims=True) else: mean = np.mean(img) img = img * r + mean * (1 - r) if self.clip or old_dtype == np.uint8: img = np.clip(img, 0, 255) return img.astype(old_dtype) class MeanVarianceNormalize(PhotometricAugmentor): def __init__(self, all_channel=True): self._init(locals()) def _augment(self, img, _): img = img.astype('float32') if self.all_channel: mean = np.mean(img) std = np.std(img) else: mean = np.mean(img, axis=(0, 1), keepdims=True) std = np.std(img, axis=(0, 1), keepdims=True) std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape))) img = (img - mean) / std return img class GaussianBlur(PhotometricAugmentor): def __init__(self, max_size=3): super(GaussianBlur, self).__init__() self._init(locals()) def _get_augment_params(self, _): sx, sy = self.rng.randint(self.max_size, size=(2,)) sx = sx * 2 + 1 sy = sy * 2 + 1 return sx, sy def _augment(self, img, s): return np.reshape(cv2.GaussianBlur(img, s, sigmaX=0, sigmaY=0, borderType=cv2.BORDER_REPLICATE), img.shape) class Gamma(PhotometricAugmentor): def __init__(self, range=(-0.5, 0.5)): super(Gamma, self).__init__() self._init(locals()) def _get_augment_params(self, _): return self._rand_range(*self.range) def _augment(self, img, gamma): old_dtype = img.dtype lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8') img = np.clip(img, 0, 255).astype('uint8') ret = cv2.LUT(img, lut).astype(old_dtype) if img.ndim == 3 and ret.ndim == 2: ret = ret[:, :, np.newaxis] return ret class Clip(PhotometricAugmentor): def __init__(self, min=0, max=255): self._init(locals()) def _augment(self, img, _): return np.clip(img, self.min, self.max) class Saturation(PhotometricAugmentor):
MIT License
polyjit/benchbuild
benchbuild/utils/uchroot.py
mkfile_uchroot
python
def mkfile_uchroot(filepath: str, root: str = ".") -> None: _uchroot = no_args() _uchroot = _uchroot["-E", "-A", "-C", "-w", "/", "-r"] _uchroot = _uchroot[os.path.abspath(root)] uretry(_uchroot["--", "/bin/touch", filepath])
Create a file inside a uchroot env. You will want to use this when you need to create a file with apropriate rights inside a uchroot container with subuid/subgid handling enabled. Args: filepath: The filepath that should be created. Absolute inside the uchroot container. root: The root PATH of the container filesystem as seen outside of the container.
https://github.com/polyjit/benchbuild/blob/04655f86ff0b28cd0770048e1213aeca3d0ee557/benchbuild/utils/uchroot.py#L231-L249
import enum import logging import os import typing as tp from plumbum import local from plumbum.commands import ProcessExecutionError from plumbum.commands.base import BoundCommand from benchbuild.settings import CFG from benchbuild.utils import path, run LOG = logging.getLogger(__name__) def uchroot(*args, **kwargs): uchroot_cmd = with_mounts(*args, uchroot_cmd_fn=no_llvm, **kwargs) return uchroot_cmd["--"] def __default_opts__(uid=0, gid=0): return [ "-C", "-w", "/", "-r", local.cwd, "-u", str(uid), "-g", str(gid), "-E", "-A" ] def no_llvm(*args, uid=0, gid=0, **kwargs): del kwargs uchroot_cmd = no_args() uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)] return uchroot_cmd[args] def no_args(**kwargs): del kwargs from benchbuild.utils.cmd import uchroot as uchrt prefixes = CFG["container"]["prefixes"].value p_paths, p_libs = env(prefixes) uchrt = run.with_env_recursive( uchrt, LD_LIBRARY_PATH=path.list_to_path(p_libs), PATH=path.list_to_path(p_paths) ) return uchrt def with_mounts(*args, uchroot_cmd_fn=no_args, **kwargs): _mounts = CFG["container"]["mounts"].value prefixes = CFG["container"]["prefixes"].value uchroot_opts, _mounts = __mounts__("mnt", _mounts) uchroot_cmd = uchroot_cmd_fn(**kwargs) uchroot_cmd = uchroot_cmd[uchroot_opts] uchroot_cmd = uchroot_cmd[args] paths, libs = env(_mounts) prefix_paths, prefix_libs = env(prefixes) uchroot_cmd = run.with_env_recursive( uchroot_cmd, LD_LIBRARY_PATH=path.list_to_path(libs + prefix_libs), PATH=path.list_to_path(paths + prefix_paths) ) return uchroot_cmd class UchrootEC(enum.Enum): MNT_FAILED = 255 MNT_PROC_FAILED = 254 MNT_DEV_FAILED = 253 MNT_SYS_FAILED = 252 MNT_PTS_FAILED = 251 def retry( pb_cmd: BoundCommand, retries: int = 0, max_retries: int = 10, retcode: int = 0, retry_retcodes: tp.Optional[tp.List[int]] = None ) -> None: try: pb_cmd.run_fg(retcode=retcode) except ProcessExecutionError as proc_ex: new_retcode = proc_ex.retcode if retries > max_retries: LOG.error("Retried %d times. No change. Abort", retries) raise if retry_retcodes and new_retcode in retry_retcodes: retry( pb_cmd, retries=retries + 1, max_retries=max_retries, retcode=retcode, retry_retcodes=retry_retcodes ) else: raise def uretry(cmd: BoundCommand, retcode: int = 0) -> None: retry( cmd, retcode=retcode, retry_retcodes=[ UchrootEC.MNT_PROC_FAILED.value, UchrootEC.MNT_DEV_FAILED.value, UchrootEC.MNT_SYS_FAILED.value, UchrootEC.MNT_PTS_FAILED.value ] ) def clean_env( uchroot_cmd: BoundCommand, varnames: tp.List[str] ) -> BoundCommand: _env = uchroot_cmd["/usr/bin/env"] __clean_env = _env["-u", ",".join(varnames)] return __clean_env def mounts(prefix: str, __mounts: tp.List) -> tp.List[str]: i = 0 mntpoints = [] for mount in __mounts: if not isinstance(mount, dict): mntpoint = "{0}/{1}".format(prefix, str(i)) mntpoints.append(mntpoint) i = i + 1 return mntpoints def __mounts__(prefix: str, _mounts: tp.List) -> tp.Tuple[tp.List[str], tp.List[str]]: i = 0 mntpoints = [] uchroot_opts = [] for mount in _mounts: if isinstance(mount, dict): src_mount = mount["src"] tgt_mount = mount["tgt"] else: src_mount = mount tgt_mount = "{0}/{1}".format(prefix, str(i)) i = i + 1 mkdir_uchroot(tgt_mount) uchroot_opts.extend(["-M", "{0}:{1}".format(src_mount, tgt_mount)]) mntpoints.append(tgt_mount) return uchroot_opts, mntpoints def env( uchroot_mounts: tp.List[str] ) -> tp.Tuple[tp.List[local.path], tp.List[local.path]]: f_mounts = [m.strip("/") for m in uchroot_mounts] root = local.path("/") ld_libs = [root / m / "lib" for m in f_mounts] ld_libs.extend([root / m / "lib64" for m in f_mounts]) paths = [root / m / "bin" for m in f_mounts] paths.extend([root / m / "sbin" for m in f_mounts]) paths.extend([root / m for m in f_mounts]) return paths, ld_libs def mkdir_uchroot(dirpath: str, root: str = ".") -> None: _uchroot = no_args() _uchroot = _uchroot["-E", "-A", "-C", "-w", "/", "-r"] _uchroot = _uchroot[os.path.abspath(root)] uretry(_uchroot["--", "/bin/mkdir", "-p", dirpath])
MIT License
artyompal/tpu_models
models/samples/core/get_started/iris_data_tpu.py
train_input_fn
python
def train_input_fn(features, labels, batch_size): dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) dataset = dataset.shuffle(1000).repeat() dataset = dataset.batch(batch_size, drop_remainder=True) return dataset
An input function for training.
https://github.com/artyompal/tpu_models/blob/639306f30e085bb1cdb5b1118a4c96a2dbe14e3e/models/samples/core/get_started/iris_data_tpu.py#L66-L78
import pandas as pd import tensorflow as tf TRAIN_URL = 'http://download.tensorflow.org/data/iris_training.csv' TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv' CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species'] SPECIES = ['Setosa', 'Versicolor', 'Virginica'] PREDICTION_INPUT_DATA = { 'SepalLength': [6.9, 5.1, 5.9], 'SepalWidth': [3.1, 3.3, 3.0], 'PetalLength': [5.4, 1.7, 4.2], 'PetalWidth': [2.1, 0.5, 1.5], } PREDICTION_OUTPUT_DATA = ['Virginica', 'Setosa', 'Versicolor'] def maybe_download(): train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL) test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL) return train_path, test_path def load_data(y_name='Species'): train_path, test_path = maybe_download() train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32, 'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32}) train_x, train_y = train, train.pop(y_name) test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32, 'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32}) test_x, test_y = test, test.pop(y_name) return (train_x, train_y), (test_x, test_y)
Apache License 2.0
pwenker/chessli
chessli/cli/stats.py
main
python
def main(ctx: typer.Context,): ctx.params = ctx.parent.params print(f"{as_title('lichess stats')}", end="\n\n")
Get stats and infos from Lichess
https://github.com/pwenker/chessli/blob/a3fdbc828e33ced5e25fbd7599be06dc4a46a0ab/chessli/cli/stats.py#L20-L24
import datetime from typing import Optional import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import typer from rich import print from rich.console import Console from chessli import users_client from chessli.enums import PerfType from chessli.utils import as_title, create_config_from_options console = Console() app = typer.Typer() @app.callback(invoke_without_command=True)
MIT License
getnikola/plugins
v8/category_prevnext/category_prevnext.py
CategoryNav.set_site
python
def set_site(self, site): super(CategoryNav, self).set_site(site) blinker.signal("taxonomies_classified").connect(self._set_navlinks)
Set site, which is a Nikola instance.
https://github.com/getnikola/plugins/blob/afafcec8a1530ee74dadfbe68ffa190b12a5a622/v8/category_prevnext/category_prevnext.py#L54-L58
from __future__ import unicode_literals import blinker from nikola.plugin_categories import SignalHandler class CategoryNav(SignalHandler): name = "category" def _set_navlinks(self, site): if site is not self.site: return for lang, langposts in site.posts_per_classification['category'].items(): for category, categoryposts in langposts.items(): for i, p in enumerate(categoryposts[1:]): p.next_post = categoryposts[i] for i, p in enumerate(categoryposts[:-1]): p.prev_post = categoryposts[i + 1] categoryposts[0].next_post = None categoryposts[-1].prev_post = None
MIT License
bitlabstudio/django-paypal-express-checkout
paypal_express_checkout/forms.py
PayPalFormMixin.get_cancel_url
python
def get_cancel_url(self): return settings.HOSTNAME + reverse( 'paypal_canceled', kwargs=self.get_url_kwargs())
Returns the paypal cancel url.
https://github.com/bitlabstudio/django-paypal-express-checkout/blob/9bb3fc653268325440a605842ddc1c56cec036a7/paypal_express_checkout/forms.py#L60-L63
import httplib import logging import urllib2 import urlparse from django import forms from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from django.http import Http404 from django.shortcuts import redirect from django.utils.timezone import now from django.utils.translation import ugettext_lazy as _ from .constants import PAYMENT_STATUS, PAYPAL_DEFAULTS from .models import ( Item, PaymentTransaction, PaymentTransactionError, PurchasedItem, ) from .settings import API_URL, LOGIN_URL from .utils import urlencode logger = logging.getLogger(__name__) CURRENCYCODE = getattr(settings, 'PAYPAL_CURRENCYCODE', 'USD') class PayPalFormMixin(object): def call_paypal(self, api_url, post_data, transaction=None): data = urlencode(post_data) try: response = urllib2.urlopen(api_url, data=data) except ( urllib2.HTTPError, urllib2.URLError, httplib.HTTPException) as ex: self.log_error( ex, api_url=api_url, request_data=data, transaction=transaction) else: parsed_response = urlparse.parse_qs(response.read()) return parsed_response
MIT License
opentoallctf/ota-challenge-bot
bottypes/challenge.py
Challenge.mark_as_solved
python
def mark_as_solved(self, solver_list, solve_date=None): self.is_solved = True self.solver = solver_list self.solve_date = solve_date or int(time.time())
Mark a challenge as solved. solver_list : List of usernames, that solved the challenge. solve_date : Time of solve (epoch) (None: current time / value: set to specified value).
https://github.com/opentoallctf/ota-challenge-bot/blob/6deea8c059d28ddb86dce277158a39a5ad9517e4/bottypes/challenge.py#L25-L33
import time class Challenge: MAX_TAGS = 5 def __init__(self, ctf_channel_id, channel_id, name, category): self.channel_id = channel_id self.ctf_channel_id = ctf_channel_id self.name = name self.category = category self.players = {} self.is_solved = False self.solver = None self.solve_date = 0 self.tags = []
MIT License
autosoft-dev/tree-hugger
tree_hugger/core/parser/python/python_parser.py
PythonParser.get_all_method_docstrings
python
def get_all_method_docstrings(self, strip_quotes: bool=False) -> Dict: captures = self._run_query_and_get_captures('all_class_method_docstrings', self.root_node) ret_struct = {} current_class = "" current_method = "" for tpl in captures: if tpl[1] == "class.name": current_class = match_from_span(tpl[0], self.splitted_code) ret_struct[current_class] = {} continue elif tpl[1] == "method.name": current_method = match_from_span(tpl[0], self.splitted_code) ret_struct[current_class][current_method] = "" continue elif tpl[1] == "method.docstr": ret_struct[current_class][current_method] = self._strip_py_doc_string(match_from_span( tpl[0], self.splitted_code ), strip_quotes) return ret_struct
Returns a dict where method names are the key and the docstrings are the values Excludes any functions, i.e., functions defined outside a class. Optional argugmet "strip_quotes" gives the choice whether the docstring returned will be strippted out of tripple quotes or not. Default: False
https://github.com/autosoft-dev/tree-hugger/blob/750de305c3efbc0c9440f858a39d33697d04d49f/tree_hugger/core/parser/python/python_parser.py#L128-L154
import re from typing import List, Dict import logging from pathlib import Path from tree_sitter import Tree, Node from tree_hugger.core.queries import Query from tree_hugger.core.code_parser import BaseParser, match_from_span import tree_hugger.setup_logging TRIPPLE_QUOTE = '"""' TRIPPLE_SINGLE_QUOTE = "'''" TRIPPLE_QUOTE_NUMPY_STYLE = 'r"""' TRIPPLE_SINGLE_QUOTE_NUMPY_STYLE = "r'''" starts_with_tripple_quote = lambda x: x.startswith(TRIPPLE_QUOTE) starts_with_tripple_single_quote = lambda x: x.startswith(TRIPPLE_SINGLE_QUOTE) starts_with_numpy_style_tripple_quote = lambda x: x.startswith(TRIPPLE_QUOTE_NUMPY_STYLE) starts_with_numpy_style_tripple_single_quote = lambda x: x.startswith(TRIPPLE_SINGLE_QUOTE_NUMPY_STYLE) regex = r"([ ]{2,})" class PythonParser(BaseParser): QUERY_FILE_PATH = Path(__file__).parent / "queries.yml" def __init__(self, library_loc: str=None, query_file_path: str=None): super(PythonParser, self).__init__('python', 'python_queries', PythonParser.QUERY_FILE_PATH, library_loc) def _strip_py_doc_string(self, dt: str, strip_quotes: bool) -> str: try: if starts_with_tripple_quote(dt): regex = r"\"{3}[\s\S]*?\"{3}" elif starts_with_tripple_single_quote(dt): regex = r"\'{3}[\s\S]*?\'{3}" elif starts_with_numpy_style_tripple_quote(dt): regex = r"r\"{3}[\s\S]*?\"{3}" elif starts_with_numpy_style_tripple_single_quote(dt): regex = r"r\'{3}[\s\S]*?\'{3}" if regex is None: logging.info(f"not a docstring {dt}") matches = re.search(regex, dt) return_dt = matches.group() if not strip_quotes: return return_dt.lstrip().rstrip() else: return return_dt.replace('"""', "").rstrip().lstrip() if return_dt.find('"""') != -1 else return_dt.replace("'''", "").rstrip().lstrip() except UnboundLocalError: return "" def _outer_indent(self, code): spaces_arr = [] matches = re.finditer(regex, code) for _, match in enumerate(matches, start=1): spaces_arr.append(len(match.group(0))) return min(spaces_arr) if spaces_arr else 4 def get_all_class_method_names(self) -> Dict: captures = self._run_query_and_get_captures('all_class_methods', self.root_node) ret_struct = {} current_key = "" for tpl in captures: if tpl[1] == "class.name": current_key = match_from_span(tpl[0], self.splitted_code) ret_struct[current_key] = [] continue else: ret_struct[current_key].append(match_from_span(tpl[0], self.splitted_code)) return ret_struct def get_all_function_names(self) -> List: captures = self._run_query_and_get_captures('all_function_names', self.root_node) all_funcs = set([match_from_span(n[0], self.splitted_code) for n in captures]) methods = self.get_all_class_method_names() all_methods = set([method_name for key, value in methods.items() for method_name in value]) return list(all_funcs - all_methods) def get_all_function_docstrings(self, strip_quotes: bool=False) -> Dict: function_names = self.get_all_function_names() captures = self._run_query_and_get_captures('all_function_doctrings', self.root_node) ret_struct = {} for i in range(0, len(captures), 2): func_name = match_from_span(captures[i][0], self.splitted_code) if func_name in function_names: ret_struct[func_name] = self._strip_py_doc_string(match_from_span( captures[i+1][0], self.splitted_code ), strip_quotes) return ret_struct def get_all_function_documentations(self) -> Dict: return self.get_all_function_docstrings()
MIT License
deep500/deep500
deep500/datasets/mnist.py
_load_mnist
python
def _load_mnist(downloaded_data, data_node_name, label_node_name, normalize=True) -> Tuple[Dataset, Dataset]: def extract_img(file_path): with gzip.open(file_path, 'rb') as f: return np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 1, 28, 28) def extract_lbl(file_path): with gzip.open(file_path, 'rb') as f: return np.frombuffer(f.read(), np.uint8, offset=8) train_img = extract_img(downloaded_data['train_images']) train_lbl = extract_lbl(downloaded_data['train_labels']) test_img = extract_img(downloaded_data['test_images']) test_lbl = extract_lbl(downloaded_data['test_labels']) if normalize: train_img = ((train_img - np.min(train_img)) / (np.max(train_img) - np.min(train_img))).astype(np.float32) test_img = ((test_img - np.min(test_img)) / (np.max(test_img) - np.min(test_img))).astype(np.float32) train_lbl = train_lbl.astype(np.int64) test_lbl = test_lbl.astype(np.int64) return (NumpyDataset(train_img, data_node_name, train_lbl, label_node_name), NumpyDataset(test_img, data_node_name, test_lbl, label_node_name))
Returns the training and testing Dataset objects for an MNIST-like dataset. @param data_node_name The graph node name for the data inputs. @param label_node_name The graph node name for the ground-truth labels. @param normalize Normalizes the input images first. @return A 2-tuple with the training and test datasets.
https://github.com/deep500/deep500/blob/34e93a46dea17513ac705bb4392b3514fa9d87c6/deep500/datasets/mnist.py#L63-L94
import tarfile from typing import List, Tuple, Dict from urllib import request import numpy as np import gzip from deep500.utils.download import real_download from deep500.lv2.dataset import Dataset, NumpyDataset from deep500.utils.onnx_interop.losses import SoftmaxCrossEntropy def download_mnist_and_get_file_paths(folder='') -> Dict[str, str]: base_url = "http://yann.lecun.com/exdb/mnist/" filenames = [ ["train_images", "train-images-idx3-ubyte.gz"], ["test_images", "t10k-images-idx3-ubyte.gz"], ["train_labels", "train-labels-idx1-ubyte.gz"], ["test_labels", "t10k-labels-idx1-ubyte.gz"] ] sub_folder = '/mnist' local_files = real_download(base_url, filenames, sub_folder, output_dir=folder) return local_files def download_fashion_mnist_and_get_file_paths(folder='') -> Dict[str, str]: base_url = "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/" filenames = [ ["train_images", "train-images-idx3-ubyte.gz"], ["test_images", "t10k-images-idx3-ubyte.gz"], ["train_labels", "train-labels-idx1-ubyte.gz"], ["test_labels", "t10k-labels-idx1-ubyte.gz"] ] sub_folder = '/fashion_mnist' local_files = real_download(base_url, filenames, sub_folder, output_dir=folder) return local_files def mnist_shape(): return (10, 1, 28, 28) def fashion_mnist_shape(): return mnist_shape() def mnist_loss(): return SoftmaxCrossEntropy def fashion_mnist_loss(): return mnist_loss()
BSD 3-Clause New or Revised License
secondmind-labs/gpflux
gpflux/layers/bayesian_dense_layer.py
BayesianDenseLayer.call
python
def call( self, inputs: TensorType, training: Optional[bool] = False ) -> Union[tf.Tensor, MeanAndVariance]: sample = self.predict_samples( inputs, num_samples=None, ) if training: loss = self.temperature * self.prior_kl() else: loss = tf.constant(0.0, dtype=default_float()) loss_per_datapoint = loss / self.num_data self.add_loss(loss_per_datapoint) return sample
The default behaviour upon calling this layer.
https://github.com/secondmind-labs/gpflux/blob/e05d7ba86aa3739a34dc6615de8f9ff810642605/gpflux/layers/bayesian_dense_layer.py#L181-L201
from typing import Callable, Optional, Union import numpy as np import tensorflow as tf from gpflow import Parameter, default_float from gpflow.base import TensorType from gpflow.kullback_leiblers import gauss_kl from gpflow.models.model import MeanAndVariance from gpflow.utilities.bijectors import positive, triangular from gpflux.helpers import xavier_initialization_numpy from gpflux.layers.trackable_layer import TrackableLayer from gpflux.types import ShapeType class BayesianDenseLayer(TrackableLayer): def __init__( self, input_dim: int, output_dim: int, num_data: int, w_mu: Optional[np.ndarray] = None, w_sqrt: Optional[np.ndarray] = None, activation: Optional[Callable] = None, is_mean_field: bool = True, temperature: float = 1e-4, ): super().__init__(dtype=default_float()) assert input_dim >= 1 assert output_dim >= 1 assert num_data >= 1 if w_mu is not None: assert w_mu.shape == ((input_dim + 1) * output_dim,) if w_sqrt is not None: if not is_mean_field: assert w_sqrt.shape == ( (input_dim + 1) * output_dim, (input_dim + 1) * output_dim, ) else: assert w_sqrt.shape == ((input_dim + 1) * output_dim,) assert temperature > 0.0 self.input_dim = input_dim self.output_dim = output_dim self.num_data = num_data self.w_mu_ini = w_mu self.w_sqrt_ini = w_sqrt self.activation = activation self.is_mean_field = is_mean_field self.temperature = temperature self.dim = (input_dim + 1) * output_dim self.full_output_cov = False self.full_cov = False self.w_mu = Parameter(np.zeros((self.dim,)), dtype=default_float(), name="w_mu") self.w_sqrt = Parameter( np.zeros((self.dim, self.dim)) if not self.is_mean_field else np.ones((self.dim,)), transform=triangular() if not self.is_mean_field else positive(), dtype=default_float(), name="w_sqrt", ) def initialize_variational_distribution(self) -> None: if self.w_mu_ini is None: w = xavier_initialization_numpy(self.input_dim, self.output_dim) b = np.zeros((1, self.output_dim)) self.w_mu_ini = np.concatenate((w, b), axis=0).reshape((self.dim,)) self.w_mu.assign(self.w_mu_ini) if self.w_sqrt_ini is None: if not self.is_mean_field: self.w_sqrt_ini = 1e-5 * np.eye(self.dim) else: self.w_sqrt_ini = 1e-5 * np.ones((self.dim,)) self.w_sqrt.assign(self.w_sqrt_ini) def build(self, input_shape: ShapeType) -> None: super().build(input_shape) self.initialize_variational_distribution() def predict_samples( self, inputs: TensorType, *, num_samples: Optional[int] = None, ) -> tf.Tensor: _num_samples = num_samples or 1 z = tf.random.normal((self.dim, _num_samples), dtype=default_float()) if not self.is_mean_field: w = self.w_mu[:, None] + tf.matmul(self.w_sqrt, z) else: w = self.w_mu[:, None] + self.w_sqrt[:, None] * z N = tf.shape(inputs)[0] inputs_concat_1 = tf.concat( (inputs, tf.ones((N, 1), dtype=default_float())), axis=-1 ) samples = tf.tensordot( inputs_concat_1, tf.reshape(tf.transpose(w), (_num_samples, self.input_dim + 1, self.output_dim)), [[-1], [1]], ) if num_samples is None: samples = tf.squeeze(samples, axis=-2) else: samples = tf.transpose(samples, perm=[1, 0, 2]) if self.activation is not None: samples = self.activation(samples) return samples
Apache License 2.0
nintorac/neuraldx7
neuralDX7/models/dx7_vae.py
DX7VAE.__init__
python
def __init__(self, features, latent_dim, encoder, decoder, num_flows=3): super().__init__() self.embedder = nn.Embedding(MAX_VALUE, features) self.encoder = ResidualAttentionEncoder(**encoder) self._latent_encoder = nn.ModuleList([ ResidualAttentionEncoder(**encoder), TriangularSylvesterFlow(features, latent_dim, num_flows)] ) self.z_to_c = nn.Linear(latent_dim, latent_dim*155) self.decoder = CondtionalResidualAttentionEncoder(**decoder) self.logits = FeedForwardGELU(features, MAX_VALUE) self.n_features = features
features - number of features in the model latent_dim - the latent dimension of the model encoder - dictionary containing instantiation parameters for ResidualAttentionEncoder module decoder - dictionary containing instantiation parameters for CondtionalResidualAttentionEncoder module num_flows - the number of flows for the TriangularSylvesterFlow module
https://github.com/nintorac/neuraldx7/blob/327844cea18a6dfe35e0dc8f5de0832343487366/neuralDX7/models/dx7_vae.py#L20-L41
import torch from torch import nn from torch.nn import functional as F from agoge import AbstractModel from neuralDX7.models.attention import ResidualAttentionEncoder, CondtionalResidualAttentionEncoder from neuralDX7.models.general import FeedForwardGELU from neuralDX7.models.stochastic_nodes import TriangularSylvesterFlow from neuralDX7.constants import MAX_VALUE, N_PARAMS from neuralDX7.utils import mask_parameters class DX7VAE(AbstractModel):
MIT License
tlc-pack/tenset
python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py
intrin_gemm_MxKxN
python
def intrin_gemm_MxKxN(M, K, N, in_dtype, out_dtype): UNIQ_ID_LEN = 8 uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN)) if isinstance(M, tvm.tir.IntImm): M = M.value if isinstance(K, tvm.tir.IntImm): K = K.value if isinstance(N, tvm.tir.IntImm): N = N.value assert K % 4 == 0 assert in_dtype == "int8" assert out_dtype == "int32" A = te.placeholder((M, K), name="a", dtype=in_dtype) B = te.placeholder((N, K), name="b", dtype=in_dtype) k = te.reduce_axis((0, K), name="k") C = te.compute( (M, N), lambda i, j: te.sum(A[i, k].astype(out_dtype) * B[j, k].astype(out_dtype), axis=k), name="c", ) A_buf = tvm.tir.decl_buffer( A.shape, A.dtype, name="A", offset_factor=1, strides=[te.var("A_s"), 1] ) B_buf = tvm.tir.decl_buffer( B.shape, B.dtype, name="B", offset_factor=1, strides=[te.var("B_s"), 1] ) C_buf = tvm.tir.decl_buffer( C.shape, C.dtype, name="C", offset_factor=1, strides=[te.var("C_s"), 1] ) def intrin_func(ins, outs): aa, bb = ins cc = outs[0] def _reduce_update(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( "int32", f"gemm_{M}x{K}x{N}_update_{uniq_id}", aa.access_ptr("r"), bb.access_ptr("r"), cc.access_ptr("w"), aa.strides[0], bb.strides[0], cc.strides[0], ) ) return ib.get() def _reduce_reset(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( "int32", f"gemm_{M}x{K}x{N}_reset_{uniq_id}", cc.access_ptr("w"), cc.strides[0] ) ) return ib.get() def _body(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( "int32", f"gemm_{M}x{K}x{N}_body_{uniq_id}", aa.access_ptr("r"), bb.access_ptr("r"), cc.access_ptr("w"), aa.strides[0], bb.strides[0], cc.strides[0], ) ) return ib.get() return _body(), _reduce_reset(), _reduce_update() intrin_decl = te.decl_tensor_intrin(C.op, intrin_func, binds={A: A_buf, B: B_buf, C: C_buf}) return intrin_decl, uniq_id
Defines a SIMD-accelerated transposed matmul.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py#L31-L119
import random import string import tvm from tvm import te
Apache License 2.0
sfanous/pyecobee
pyecobee/objects/sensor.py
Sensor.temp_correction
python
def temp_correction(self): return self._temp_correction
Gets the temp_correction attribute of this Sensor instance. :return: The value of the temp_correction attribute of this Sensor instance. :rtype: int
https://github.com/sfanous/pyecobee/blob/3d6b4aec3c6bc9b796aa3d3fd6626909ffdbac13/pyecobee/objects/sensor.py#L242-L251
from pyecobee.ecobee_object import EcobeeObject class Sensor(EcobeeObject): __slots__ = [ '_name', '_manufacturer', '_model', '_zone', '_sensor_id', '_type', '_usage', '_number_of_bits', '_bconstant', '_thermistor_size', '_temp_correction', '_gain', '_max_voltage', '_multiplier', '_states', ] attribute_name_map = { 'name': 'name', 'manufacturer': 'manufacturer', 'model': 'model', 'zone': 'zone', 'sensor_id': 'sensorId', 'sensorId': 'sensor_id', 'type': 'type', 'usage': 'usage', 'number_of_bits': 'numberOfBits', 'numberOfBits': 'number_of_bits', 'bconstant': 'bconstant', 'thermistor_size': 'thermistorSize', 'thermistorSize': 'thermistor_size', 'temp_correction': 'tempCorrection', 'tempCorrection': 'temp_correction', 'gain': 'gain', 'max_voltage': 'maxVoltage', 'maxVoltage': 'max_voltage', 'multiplier': 'multiplier', 'states': 'states', } attribute_type_map = { 'name': 'six.text_type', 'manufacturer': 'six.text_type', 'model': 'six.text_type', 'zone': 'int', 'sensor_id': 'int', 'type': 'six.text_type', 'usage': 'six.text_type', 'number_of_bits': 'int', 'bconstant': 'int', 'thermistor_size': 'int', 'temp_correction': 'int', 'gain': 'int', 'max_voltage': 'int', 'multiplier': 'int', 'states': 'List[State]', } def __init__( self, name=None, manufacturer=None, model=None, zone=None, sensor_id=None, type_=None, usage=None, number_of_bits=None, bconstant=None, thermistor_size=None, temp_correction=None, gain=None, max_voltage=None, multiplier=None, states=None, ): self._name = name self._manufacturer = manufacturer self._model = model self._zone = zone self._sensor_id = sensor_id self._type = type_ self._usage = usage self._number_of_bits = number_of_bits self._bconstant = bconstant self._thermistor_size = thermistor_size self._temp_correction = temp_correction self._gain = gain self._max_voltage = max_voltage self._multiplier = multiplier self._states = states @property def name(self): return self._name @property def manufacturer(self): return self._manufacturer @property def model(self): return self._model @property def zone(self): return self._zone @property def sensor_id(self): return self._sensor_id @property def type(self): return self._type @property def usage(self): return self._usage @property def number_of_bits(self): return self._number_of_bits @property def bconstant(self): return self._bconstant @property def thermistor_size(self): return self._thermistor_size @property
MIT License
idlesign/django-sitemessage
sitemessage/models.py
Dispatch.is_read
python
def is_read(self) -> bool: return self.read_status == self.READ_STATUS_READ
Returns message read flag.
https://github.com/idlesign/django-sitemessage/blob/a2db821f9365194cef5e120251f8efa476a6b2af/sitemessage/models.py#L243-L245
import json from typing import Type, List, Optional, Union, Tuple, Dict, Iterable from django.conf import settings from django.contrib.auth.base_user import AbstractBaseUser from django.core import exceptions from django.db import models, transaction, DatabaseError, NotSupportedError from django.db.models import QuerySet from django.utils import timezone from django.utils.translation import gettext_lazy as _ from .utils import get_registered_message_type, Recipient if False: from .messages.base import MessageBase from .messengers.base import MessengerBase USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User') def _get_dispatches(filter_kwargs: dict) -> List['Dispatch']: dispatches = Dispatch.objects.prefetch_related('message').filter( **filter_kwargs ).order_by('-message__time_created') return list(dispatches) def _get_dispatches_for_update(filter_kwargs: dict) -> Optional[List['Dispatch']]: dispatches = Dispatch.objects.prefetch_related('message').filter( **filter_kwargs ).select_for_update( **GET_DISPATCHES_ARGS[1] ).order_by('-message__time_created') try: dispatches = list(dispatches) except NotSupportedError: return None except DatabaseError: return [] return dispatches GET_DISPATCHES_ARGS = [ _get_dispatches_for_update, {'skip_locked': True} ] class ContextField(models.TextField): @classmethod def parse_value(cls, value: str): try: return json.loads(value) except ValueError: raise exceptions.ValidationError( _('Value `%r` is not a valid context.') % value, code='invalid_context', params={'value': value}) def from_db_value(self, *args): value, expression, connection = args[:3] if value is None: return {} return self.parse_value(value) def to_python(self, value: Union[dict, str]): if not value: return {} if isinstance(value, dict): return value return self.parse_value(value) def get_prep_value(self, value: dict): return json.dumps(value, ensure_ascii=False) class Message(models.Model): time_created = models.DateTimeField(_('Time created'), auto_now_add=True, editable=False) sender = models.ForeignKey( USER_MODEL, verbose_name=_('Sender'), null=True, blank=True, on_delete=models.CASCADE) cls = models.CharField( _('Message class'), max_length=250, db_index=True, help_text=_('Message logic class identifier.')) context = ContextField(_('Message context')) priority = models.PositiveIntegerField( _('Priority'), default=0, db_index=True, help_text=_('Number describing message sending priority. ' 'Messages with different priorities can be sent with different periodicity.')) dispatches_ready = models.BooleanField( _('Dispatches ready'), db_index=True, default=False, help_text=_('Indicates whether dispatches for this message are already formed and ready to delivery.')) class Meta: verbose_name = _('Message') verbose_name_plural = _('Messages') def __str__(self) -> str: return self.cls def get_type(self) -> Type['MessageBase']: return get_registered_message_type(self.cls) @classmethod def get_without_dispatches(cls) -> Union[List['Message'], QuerySet]: return cls.objects.filter(dispatches_ready=False).all() @classmethod def create( cls, message_class: str, context: dict, recipients: Optional[Union[Iterable[Recipient], Recipient]] = None, sender: Optional[AbstractBaseUser] = None, priority: Optional[int] = None ) -> Tuple['Message', List['Dispatch']]: dispatches_ready = False if recipients is not None: dispatches_ready = True msg_kwargs = { 'cls': message_class, 'context': context, 'sender': sender, 'dispatches_ready': dispatches_ready } if priority is not None: msg_kwargs['priority'] = priority message_model = cls(**msg_kwargs) message_model.save() dispatch_models = Dispatch.create(message_model, recipients) return message_model, dispatch_models class Dispatch(models.Model): DISPATCH_STATUS_PENDING = 1 DISPATCH_STATUS_SENT = 2 DISPATCH_STATUS_ERROR = 3 DISPATCH_STATUS_FAILED = 4 DISPATCH_STATUS_PROCESSING = 5 DISPATCH_STATUSES = ( (DISPATCH_STATUS_PENDING, _('Pending')), (DISPATCH_STATUS_PROCESSING, _('Processing')), (DISPATCH_STATUS_SENT, _('Sent')), (DISPATCH_STATUS_ERROR, _('Error')), (DISPATCH_STATUS_FAILED, _('Failed')), ) READ_STATUS_UNREAD = 0 READ_STATUS_READ = 1 READ_STATUSES = ( (READ_STATUS_UNREAD, _('Unread')), (READ_STATUS_READ, _('Read')), ) error_log = None time_created = models.DateTimeField( _('Time created'), auto_now_add=True, editable=False) time_dispatched = models.DateTimeField( _('Time dispatched'), editable=False, null=True, blank=True, help_text=_('Time of the last delivery attempt.')) message = models.ForeignKey(Message, verbose_name=_('Message'), on_delete=models.CASCADE) messenger = models.CharField( _('Messenger'), max_length=250, db_index=True, help_text=_('Messenger class identifier.')) recipient = models.ForeignKey( USER_MODEL, verbose_name=_('Recipient'), null=True, blank=True, on_delete=models.CASCADE) address = models.CharField(_('Address'), max_length=250, help_text=_('Recipient address.')) retry_count = models.PositiveIntegerField( _('Retry count'), default=0, help_text=_('A number of delivery retries has already been made.')) message_cache = models.TextField(_('Message cache'), null=True, editable=False) dispatch_status = models.PositiveIntegerField( _('Dispatch status'), choices=DISPATCH_STATUSES, default=DISPATCH_STATUS_PENDING) read_status = models.PositiveIntegerField(_('Read status'), choices=READ_STATUSES, default=READ_STATUS_UNREAD) class Meta: verbose_name = _('Dispatch') verbose_name_plural = _('Dispatches') def __str__(self) -> str: return f'{self.address} [{self.messenger}]'
BSD 3-Clause New or Revised License
aminyazdanpanah/python-ffmpeg-video-streaming
ffmpeg_streaming/_input.py
Capture.__init__
python
def __init__(self, video, options): self.options = options self.video = video
@TODO: add documentation
https://github.com/aminyazdanpanah/python-ffmpeg-video-streaming/blob/731530fd9e569362f9ba30d723b395bf0d011eb3/ffmpeg_streaming/_input.py#L21-L26
from ffmpeg_streaming._media import Media from ffmpeg_streaming._utiles import get_os, cnv_options_to_args from ffmpeg_streaming._clouds import Clouds cloud = None class Capture(object):
MIT License
pabigot/pyxb
pyxb/binding/facets.py
Facet.ClassForFacet
python
def ClassForFacet (cls, name): assert cls != Facet if 0 <= name.find(':'): name = name.split(':', 1)[1] facet_class = globals().get('%s_%s' % (cls._FacetPrefix, name)) if facet_class is None: raise pyxb.LogicError('Unrecognized facet name %s: expect %s' % (name, ','.join([_f._Name for _f in cls.Facets]))) assert facet_class is not None return facet_class
Given the name of a facet, return the Facet subclass that represents it.
https://github.com/pabigot/pyxb/blob/14737c23a125fd12c954823ad64fc4497816fae3/pyxb/binding/facets.py#L137-L146
import logging import re import decimal import pyxb from . import datatypes from . import basis from pyxb.utils import utility, six _log = logging.getLogger(__name__) class Facet (pyxb.cscRoot): _Name = None @classmethod def Name (self): return self._Name __baseTypeDefinition = None def baseTypeDefinition (self): return self.__baseTypeDefinition __ownerTypeDefinition = None def ownerTypeDefinition (self): return self.__ownerTypeDefinition _ValueDatatype = None __valueDatatype = None def valueDatatype (self): if self.__valueDatatype is None: assert self.baseTypeDefinition() is not None return self.baseTypeDefinition().pythonSupport() return self.__valueDatatype __value = None def _value (self, v): self.__value = v def value (self): return self.__value __annotation = None def annotation (self): return self.__annotation def __init__ (self, **kw): super(Facet, self).__init__(**kw) assert Facet != self.__class__ self.setFromKeywords(_reset=True, _constructor=True, **kw) def _setFromKeywords_vb (self, **kw): if not kw.get('_reset', False): kw.setdefault('base_type_definition', self.__baseTypeDefinition) kw.setdefault('owner_type_definition', self.__ownerTypeDefinition) kw.setdefault('value_datatype', self.__valueDatatype) self.__baseTypeDefinition = kw.get('base_type_definition') self.__ownerTypeDefinition = kw.get('owner_type_definition') self.__valueDatatype = kw.get('value_datatype', self._ValueDatatype) assert (self.__valueDatatype is not None) or (self.__baseTypeDefinition is not None) super_fn = getattr(super(Facet, self), '_setFromKeywords_vb', lambda *a,**kw: self) return super_fn(**kw) def setFromKeywords (self, **kw): return self._setFromKeywords_vb(**kw) @classmethod
Apache License 2.0
jobovy/galpy
galpy/potential/Potential.py
Potential.z2deriv
python
def z2deriv(self,R,Z,phi=0.,t=0.): try: return self._amp*self._z2deriv(R,Z,phi=phi,t=t) except AttributeError: raise PotentialError("'_z2deriv' function not implemented for this potential")
NAME: z2deriv PURPOSE: evaluate the second vertical derivative INPUT: R - Galactocentric radius (can be Quantity) Z - vertical height (can be Quantity) phi - Galactocentric azimuth (can be Quantity) t - time (can be Quantity) OUTPUT: d2phi/dz2 HISTORY: 2012-07-25 - Written - Bovy (IAS@MPIA)
https://github.com/jobovy/galpy/blob/0470fa3e990f44319e9340497f669699d1bf1008/galpy/potential/Potential.py#L572-L604
from __future__ import division, print_function import os, os.path import pickle from functools import wraps import warnings import numpy from scipy import optimize, integrate from ..util import plot, coords, conversion from ..util.conversion import velocity_in_kpcGyr, physical_conversion, potential_physical_input, freq_in_Gyr, get_physical from ..util import galpyWarning from .plotRotcurve import plotRotcurve, vcirc from .plotEscapecurve import _INF, plotEscapecurve from .DissipativeForce import DissipativeForce, _isDissipative from .Force import Force, _APY_LOADED if _APY_LOADED: from astropy import units def check_potential_inputs_not_arrays(func): @wraps(func) def func_wrapper(self,R,z,phi,t): if (hasattr(R,'shape') and R.shape != () and len(R) > 1) or (hasattr(z,'shape') and z.shape != () and len(z) > 1) or (hasattr(phi,'shape') and phi.shape != () and len(phi) > 1) or (hasattr(t,'shape') and t.shape != () and len(t) > 1): raise TypeError('Methods in {} do not accept array inputs. Please input scalars'.format(self.__class__.__name__)) return func(self,R,z,phi,t) return func_wrapper class Potential(Force): def __init__(self,amp=1.,ro=None,vo=None,amp_units=None): Force.__init__(self,amp=amp,ro=ro,vo=vo,amp_units=amp_units) self.dim= 3 self.isRZ= True self.isNonAxi= False self.hasC= False self.hasC_dxdv= False self.hasC_dens= False return None @potential_physical_input @physical_conversion('energy',pop=True) def __call__(self,R,z,phi=0.,t=0.,dR=0,dphi=0): return self._call_nodecorator(R,z,phi=phi,t=t,dR=dR,dphi=dphi) def _call_nodecorator(self,R,z,phi=0.,t=0.,dR=0.,dphi=0): if dR == 0 and dphi == 0: try: rawOut= self._evaluate(R,z,phi=phi,t=t) except AttributeError: raise PotentialError("'_evaluate' function not implemented for this potential") if rawOut is None: return rawOut else: return self._amp*rawOut elif dR == 1 and dphi == 0: return -self.Rforce(R,z,phi=phi,t=t,use_physical=False) elif dR == 0 and dphi == 1: return -self.phiforce(R,z,phi=phi,t=t,use_physical=False) elif dR == 2 and dphi == 0: return self.R2deriv(R,z,phi=phi,t=t,use_physical=False) elif dR == 0 and dphi == 2: return self.phi2deriv(R,z,phi=phi,t=t,use_physical=False) elif dR == 1 and dphi == 1: return self.Rphideriv(R,z,phi=phi,t=t,use_physical=False) elif dR != 0 or dphi != 0: raise NotImplementedError('Higher-order derivatives not implemented for this potential') @potential_physical_input @physical_conversion('force',pop=True) def Rforce(self,R,z,phi=0.,t=0.): return self._Rforce_nodecorator(R,z,phi=phi,t=t) def _Rforce_nodecorator(self,R,z,phi=0.,t=0.): try: return self._amp*self._Rforce(R,z,phi=phi,t=t) except AttributeError: raise PotentialError("'_Rforce' function not implemented for this potential") @potential_physical_input @physical_conversion('force',pop=True) def zforce(self,R,z,phi=0.,t=0.): return self._zforce_nodecorator(R,z,phi=phi,t=t) def _zforce_nodecorator(self,R,z,phi=0.,t=0.): try: return self._amp*self._zforce(R,z,phi=phi,t=t) except AttributeError: raise PotentialError("'_zforce' function not implemented for this potential") @potential_physical_input @physical_conversion('forcederivative',pop=True) def r2deriv(self,R,z,phi=0.,t=0.): r= numpy.sqrt(R**2.+z**2.) return (self.R2deriv(R,z,phi=phi,t=t,use_physical=False)*R/r +self.Rzderiv(R,z,phi=phi,t=t,use_physical=False)*z/r)*R/r +(self.Rzderiv(R,z,phi=phi,t=t,use_physical=False)*R/r +self.z2deriv(R,z,phi=phi,t=t,use_physical=False)*z/r)*z/r @potential_physical_input @physical_conversion('density',pop=True) def dens(self,R,z,phi=0.,t=0.,forcepoisson=False): try: if forcepoisson: raise AttributeError return self._amp*self._dens(R,z,phi=phi,t=t) except AttributeError: return (-self.Rforce(R,z,phi=phi,t=t,use_physical=False)/R +self.R2deriv(R,z,phi=phi,t=t,use_physical=False) +self.phi2deriv(R,z,phi=phi,t=t,use_physical=False)/R**2. +self.z2deriv(R,z,phi=phi,t=t,use_physical=False))/4./numpy.pi @potential_physical_input @physical_conversion('surfacedensity',pop=True) def surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False): try: if forcepoisson: raise AttributeError return self._amp*self._surfdens(R,z,phi=phi,t=t) except AttributeError: return (-self.zforce(R,numpy.fabs(z),phi=phi,t=t,use_physical=False) +self.zforce(R,-numpy.fabs(z),phi=phi,t=t,use_physical=False) +integrate.quad( lambda x: -self.Rforce(R,x,phi=phi,t=t,use_physical=False)/R +self.R2deriv(R,x,phi=phi,t=t,use_physical=False) +self.phi2deriv(R,x,phi=phi,t=t,use_physical=False)/R**2., -numpy.fabs(z),numpy.fabs(z))[0])/4./numpy.pi def _surfdens(self,R,z,phi=0.,t=0.): return integrate.quad(lambda x: self._dens(R,x,phi=phi,t=t), -numpy.fabs(z),numpy.fabs(z))[0] @potential_physical_input @physical_conversion('mass',pop=True) def mass(self,R,z=None,t=0.,forceint=False): from .EllipsoidalPotential import EllipsoidalPotential if self.isNonAxi and not isinstance(self,EllipsoidalPotential): raise NotImplementedError('mass for non-axisymmetric potentials that are not EllipsoidalPotentials is not currently supported') try: if forceint: raise AttributeError return self._amp*self._mass(R,z=z,t=t) except AttributeError: if z is None: def _integrand(theta): tz= R*numpy.cos(theta) tR= R*numpy.sin(theta) return self.rforce(tR,tz,t=t,use_physical=False) *numpy.sin(theta) return -R**2.*integrate.quad(_integrand,0.,numpy.pi)[0]/2. else: return -R*integrate.quad(lambda x: self.Rforce(R,x,t=t, use_physical=False), -z,z)[0]/2. -integrate.quad(lambda x: x*self.zforce(x,z,t=t, use_physical=False), 0.,R)[0] @physical_conversion('position',pop=True) def rhalf(self,t=0.,INF=numpy.inf): return rhalf(self,t=t,INF=INF,use_physical=False) @potential_physical_input @physical_conversion('time',pop=True) def tdyn(self,R,t=0.): return 2.*numpy.pi*R*numpy.sqrt(R/self.mass(R,use_physical=False)) @physical_conversion('mass',pop=False) def mvir(self,H=70.,Om=0.3,t=0.,overdens=200.,wrtcrit=False, forceint=False,ro=None,vo=None, use_physical=False): if ro is None: ro= self._ro if vo is None: vo= self._vo try: rvir= self.rvir(H=H,Om=Om,t=t,overdens=overdens,wrtcrit=wrtcrit, use_physical=False,ro=ro,vo=vo) except AttributeError: raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius") return self.mass(rvir,t=t,forceint=forceint,use_physical=False,ro=ro,vo=vo) @potential_physical_input @physical_conversion('forcederivative',pop=True) def R2deriv(self,R,Z,phi=0.,t=0.): try: return self._amp*self._R2deriv(R,Z,phi=phi,t=t) except AttributeError: raise PotentialError("'_R2deriv' function not implemented for this potential") @potential_physical_input @physical_conversion('forcederivative',pop=True)
BSD 3-Clause New or Revised License
ntoll/p4p2p
p4p2p/dht/bucket.py
Bucket.key_in_range
python
def key_in_range(self, key): if isinstance(key, str): key = int(key, 0) return self.range_min <= key < self.range_max
Checks if a key is within the range covered by this bucket. Returns a boolean to indicate if a certain key should be placed within this bucket.
https://github.com/ntoll/p4p2p/blob/189a35ae964bef7e6db094283f3ead79c6356a6c/p4p2p/dht/bucket.py#L107-L115
from .constants import K class BucketFull(Exception): pass class Bucket(object): def __init__(self, range_min, range_max): self.range_min = range_min self.range_max = range_max self._contacts = [] self.last_accessed = 0 def add_contact(self, contact): if contact in self._contacts: self._contacts.remove(contact) self._contacts.append(contact) elif len(self._contacts) < K: self._contacts.append(contact) else: raise BucketFull("No space in bucket to insert contact.") def get_contact(self, network_id): index = self._contacts.index(network_id) return self._contacts[index] def get_contacts(self, count=0, exclude_contact=None): current_len = len(self._contacts) if count <= 0: count = current_len if not self._contacts: contact_list = [] elif current_len < count: contact_list = self._contacts[:current_len] else: contact_list = self._contacts[:count] if exclude_contact in contact_list: contact_list.remove(exclude_contact) return contact_list def remove_contact(self, network_id): self._contacts.remove(network_id)
MIT License
tnbar/tednet
tednet/tnn/tensor_train/base.py
TTLinear.__init__
python
def __init__(self, in_shape: Union[list, np.ndarray], out_shape: Union[list, np.ndarray], ranks: Union[list, np.ndarray], bias: bool = True): super(TTLinear, self).__init__(in_shape=in_shape, out_shape=out_shape, ranks=ranks, bias=bias) self.reset_parameters()
Tensor Train Decomposition Linear. Parameters ---------- in_shape : Union[list, numpy.ndarray] 1-D param :math:`\in \mathbb{R}^m`. The decomposition shape of feature in out_shape : Union[list, numpy.ndarray] 1-D param :math:`\in \mathbb{R}^m`. The decomposition shape of feature out ranks : Union[list, numpy.ndarray] 1-D param :math:`\in \mathbb{R}^{m-1}`. The rank of the decomposition bias : bool use bias of convolution or not. ``True`` to use, and ``False`` to not use
https://github.com/tnbar/tednet/blob/fe2c531ab9c388284b4ebc22edcc53df147a3a1f/tednet/tnn/tensor_train/base.py#L153-L170
import math from typing import Union import torch import torch.nn as nn from torch.nn import functional as F import numpy as np from ..tn_cnn import _TNConvNd from ..tn_linear import _TNLinear class TTConv2D(_TNConvNd): def __init__(self, in_shape: Union[list, np.ndarray], out_shape: Union[list, np.ndarray], ranks: Union[list, np.ndarray], kernel_size: Union[int, tuple], stride=1, padding=0, bias=True): super(TTConv2D, self).__init__(in_shape=in_shape, out_shape=out_shape, ranks=ranks, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) self.reset_parameters() def set_tn_type(self): self.tn_info["type"] = "tt" def set_nodes(self): self.in_num = len(self.in_shape) self.out_num = len(self.out_shape) self.core_num = self.in_num assert self.in_num == self.out_num == len(self.ranks), "Input and output number should be equal to rank number." nodes_info = [] for i in range(0, self.core_num): if i < self.core_num - 1: node_info = dict( name="node%d" % i, shape=(self.in_shape[i], self.out_shape[i], self.ranks[i], self.ranks[i + 1]) ) else: node_info = dict( name="node%d" % i, shape=(self.in_shape[i], self.out_shape[i], self.ranks[i]) ) tmp = nn.Parameter(torch.Tensor(*node_info["shape"])) self.register_parameter(node_info["name"], tmp) nodes_info.append(node_info) self.kernel = nn.Conv2d(1, self.ranks[0], self.kernel_size, self.stride, self.padding, bias=False) self.tn_info["nodes"] = nodes_info def set_params_info(self): params_ori = self.in_size * self.out_size * np.prod(self.kernel_size) tt_ranks_1 = np.append(self.ranks, 1) params_tt = np.sum(tt_ranks_1[:self.in_num] * self.in_shape * self.out_shape * tt_ranks_1[1:(self.in_num + 1)]) param_kernel = np.prod(self.kernel_size) * self.ranks[0] params_tt = params_tt + param_kernel compression_ration = params_ori / params_tt self.tn_info["t_params"] = params_tt self.tn_info["ori_params"] = params_ori self.tn_info["cr"] = compression_ration print("compression_ration is: ", compression_ration) def reset_parameters(self): node_vars = [] for i in range(self.core_num): node_vars.append(1. / (self.in_shape[i] * self.ranks[i])) conv_node_var = 2. / (self.kernel_size[0] * self.kernel_size[1]) std = math.pow(math.sqrt(np.prod(node_vars) * conv_node_var), 1. / (self.core_num + 1)) for i in range(self.core_num): nn.init.normal_(getattr(self, "node%d" % i), std=std) nn.init.normal_(self.kernel.weight.data, std=std) if self.bias is not None: nn.init.zeros_(self.bias) def tn_contract(self, inputs: torch.Tensor)->torch.Tensor: batch_size = inputs.shape[0] image_hw = inputs.shape[-2:] res = inputs.view(-1, 1, *image_hw) res = self.kernel(res) new_hw = res.shape[-2:] res = res.reshape(batch_size, *self.in_shape, self.ranks[0], -1) weight_tmp = getattr(self, "node0") res = torch.tensordot(res, weight_tmp, dims=([1, -2], [0, 2])) for i in range(1, self.core_num): weight_tmp = getattr(self, "node%d" % i) res = torch.tensordot(res, weight_tmp, dims=([1, -1], [0, 2])) res = res.reshape(batch_size, *new_hw, -1) return res def recover(self): pass class TTLinear(_TNLinear):
MIT License
deepset-ai/haystack
haystack/modeling/model/language_model.py
LanguageModel.unfreeze
python
def unfreeze(self): raise NotImplementedError()
To be implemented
https://github.com/deepset-ai/haystack/blob/38652dd4dd3e5f4b66bbf70def84220f723add2c/haystack/modeling/model/language_model.py#L259-L261
from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import os from pathlib import Path import numpy as np import torch from torch import nn logger = logging.getLogger(__name__) from transformers import ( BertModel, BertConfig, RobertaModel, RobertaConfig, XLNetModel, XLNetConfig, AlbertModel, AlbertConfig, XLMRobertaModel, XLMRobertaConfig, DistilBertModel, DistilBertConfig, ElectraModel, ElectraConfig, CamembertModel, CamembertConfig, BigBirdModel, BigBirdConfig ) from transformers import AutoModel, AutoConfig from transformers.modeling_utils import SequenceSummary import transformers OUTPUT_DIM_NAMES = ["dim", "hidden_size", "d_model"] class LanguageModel(nn.Module): subclasses: dict = {} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) cls.subclasses[cls.__name__] = cls def forward(self, input_ids, padding_mask, **kwargs): raise NotImplementedError @classmethod def load(cls, pretrained_model_name_or_path, revision=None, n_added_tokens=0, language_model_class=None, **kwargs): kwargs["revision"] = revision logger.info("") logger.info("LOADING MODEL") logger.info("=============") config_file = Path(pretrained_model_name_or_path) / "language_model_config.json" if os.path.exists(config_file): logger.info(f"Model found locally at {pretrained_model_name_or_path}") config = json.load(open(config_file)) language_model = cls.subclasses[config["name"]].load(pretrained_model_name_or_path) else: logger.info(f"Could not find {pretrained_model_name_or_path} locally.") logger.info(f"Looking on Transformers Model Hub (in local cache and online)...") if language_model_class is None: language_model_class = cls.get_language_model_class(pretrained_model_name_or_path) if language_model_class: language_model = cls.subclasses[language_model_class].load(pretrained_model_name_or_path, **kwargs) else: language_model = None if not language_model: raise Exception( f"Model not found for {pretrained_model_name_or_path}. Either supply the local path for a saved " f"model or one of bert/roberta/xlnet/albert/distilbert models that can be downloaded from remote. " f"Ensure that the model class name can be inferred from the directory name when loading a " f"Transformers' model." ) else: logger.info(f"Loaded {pretrained_model_name_or_path}") if n_added_tokens != 0: model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings vocab_size = model_emb_size + n_added_tokens logger.info( f"Resizing embedding layer of LM from {model_emb_size} to {vocab_size} to cope with custom vocab.") language_model.model.resize_token_embeddings(vocab_size) model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings assert vocab_size == model_emb_size return language_model @staticmethod def get_language_model_class(model_name_or_path, **kwargs): model_name_or_path = str(model_name_or_path) config = AutoConfig.from_pretrained(model_name_or_path, **kwargs) model_type = config.model_type if model_type == "xlm-roberta": language_model_class = "XLMRoberta" elif model_type == "roberta": if "mlm" in model_name_or_path.lower(): raise NotImplementedError("MLM part of codebert is currently not supported in Haystack") language_model_class = "Roberta" elif model_type == "camembert": language_model_class = "Camembert" elif model_type == "albert": language_model_class = "Albert" elif model_type == "distilbert": language_model_class = "DistilBert" elif model_type == "bert": language_model_class = "Bert" elif model_type == "xlnet": language_model_class = "XLNet" elif model_type == "electra": language_model_class = "Electra" elif model_type == "dpr": if config.architectures[0] == "DPRQuestionEncoder": language_model_class = "DPRQuestionEncoder" elif config.architectures[0] == "DPRContextEncoder": language_model_class = "DPRContextEncoder" elif config.archictectures[0] == "DPRReader": raise NotImplementedError("DPRReader models are currently not supported.") elif model_type == "big_bird": language_model_class = "BigBird" else: logger.warning("Could not infer LanguageModel class from config. Trying to infer " "LanguageModel class from model name.") language_model_class = LanguageModel._infer_language_model_class_from_string(model_name_or_path) return language_model_class @staticmethod def _infer_language_model_class_from_string(model_name_or_path): if "xlm" in model_name_or_path.lower() and "roberta" in model_name_or_path.lower(): language_model_class = "XLMRoberta" elif "bigbird" in model_name_or_path.lower(): language_model_class = "BigBird" elif "roberta" in model_name_or_path.lower(): language_model_class = "Roberta" elif "codebert" in model_name_or_path.lower(): if "mlm" in model_name_or_path.lower(): raise NotImplementedError("MLM part of codebert is currently not supported in Haystack") else: language_model_class = "Roberta" elif "camembert" in model_name_or_path.lower() or "umberto" in model_name_or_path.lower(): language_model_class = "Camembert" elif "albert" in model_name_or_path.lower(): language_model_class = 'Albert' elif "distilbert" in model_name_or_path.lower(): language_model_class = 'DistilBert' elif "bert" in model_name_or_path.lower(): language_model_class = 'Bert' elif "xlnet" in model_name_or_path.lower(): language_model_class = 'XLNet' elif "electra" in model_name_or_path.lower(): language_model_class = 'Electra' elif "word2vec" in model_name_or_path.lower() or "glove" in model_name_or_path.lower(): language_model_class = 'WordEmbedding_LM' elif "minilm" in model_name_or_path.lower(): language_model_class = "Bert" elif "dpr-question_encoder" in model_name_or_path.lower(): language_model_class = "DPRQuestionEncoder" elif "dpr-ctx_encoder" in model_name_or_path.lower(): language_model_class = "DPRContextEncoder" else: language_model_class = None return language_model_class def get_output_dims(self): config = self.model.config for odn in OUTPUT_DIM_NAMES: if odn in dir(config): return getattr(config, odn) else: raise Exception("Could not infer the output dimensions of the language model") def freeze(self, layers): raise NotImplementedError()
Apache License 2.0
pyro-ppl/pyro
pyro/contrib/epidemiology/compartmental.py
CompartmentalModel.fit_svi
python
def fit_svi( self, *, num_samples=100, num_steps=2000, num_particles=32, learning_rate=0.1, learning_rate_decay=0.01, betas=(0.8, 0.99), haar=True, init_scale=0.01, guide_rank=0, jit=False, log_every=200, **options, ): self.relaxed = True self.num_quant_bins = 1 if haar: time_dim = -2 if self.is_regional else -1 dims = {"auxiliary": time_dim} supports = {"auxiliary": constraints.interval(-0.5, self.population + 0.5)} for name, (fn, is_regional) in self._non_compartmental.items(): dims[name] = time_dim - fn.event_dim supports[name] = fn.support haar = _HaarSplitReparam(0, self.duration, dims, supports) heuristic_options = { k.replace("heuristic_", ""): options.pop(k) for k in list(options) if k.startswith("heuristic_") } assert not options, "unrecognized options: {}".format(", ".join(options)) init_strategy = self._heuristic(haar, **heuristic_options) logger.info("Running inference...") model = self._relaxed_model if haar: model = haar.reparam(model) if guide_rank == 0: guide = AutoNormal(model, init_loc_fn=init_strategy, init_scale=init_scale) elif guide_rank == "full": guide = AutoMultivariateNormal( model, init_loc_fn=init_strategy, init_scale=init_scale ) elif guide_rank is None or isinstance(guide_rank, int): guide = AutoLowRankMultivariateNormal( model, init_loc_fn=init_strategy, init_scale=init_scale, rank=guide_rank ) else: raise ValueError("Invalid guide_rank: {}".format(guide_rank)) Elbo = JitTrace_ELBO if jit else Trace_ELBO elbo = Elbo( max_plate_nesting=self.max_plate_nesting, num_particles=num_particles, vectorize_particles=True, ignore_jit_warnings=True, ) optim = ClippedAdam( { "lr": learning_rate, "betas": betas, "lrd": learning_rate_decay ** (1 / num_steps), } ) svi = SVI(model, guide, optim, elbo) start_time = default_timer() losses = [] for step in range(1 + num_steps): loss = svi.step() / self.duration if step % log_every == 0: logger.info("step {} loss = {:0.4g}".format(step, loss)) losses.append(loss) elapsed = default_timer() - start_time logger.info( "SVI took {:0.1f} seconds, {:0.1f} step/sec".format( elapsed, (1 + num_steps) / elapsed ) ) with torch.no_grad(): particle_plate = pyro.plate( "particles", num_samples, dim=-1 - self.max_plate_nesting ) guide_trace = poutine.trace(particle_plate(guide)).get_trace() model_trace = poutine.trace( poutine.replay(particle_plate(model), guide_trace) ).get_trace() self.samples = { name: site["value"] for name, site in model_trace.nodes.items() if site["type"] == "sample" if not site["is_observed"] if not site_is_subsample(site) } if haar: haar.aux_to_user(self.samples) assert all(v.size(0) == num_samples for v in self.samples.values()), { k: tuple(v.shape) for k, v in self.samples.items() } return losses
Runs stochastic variational inference to generate posterior samples. This runs :class:`~pyro.infer.svi.SVI`, setting the ``.samples`` attribute on completion. This approximate inference method is useful for quickly iterating on probabilistic models. :param int num_samples: Number of posterior samples to draw from the trained guide. Defaults to 100. :param int num_steps: Number of :class:`~pyro.infer.svi.SVI` steps. :param int num_particles: Number of :class:`~pyro.infer.svi.SVI` particles per step. :param int learning_rate: Learning rate for the :class:`~pyro.optim.clipped_adam.ClippedAdam` optimizer. :param int learning_rate_decay: Learning rate for the :class:`~pyro.optim.clipped_adam.ClippedAdam` optimizer. Note this is decay over the entire schedule, not per-step decay. :param tuple betas: Momentum parameters for the :class:`~pyro.optim.clipped_adam.ClippedAdam` optimizer. :param bool haar: Whether to use a Haar wavelet reparameterizer. :param int guide_rank: Rank of the auto normal guide. If zero (default) use an :class:`~pyro.infer.autoguide.AutoNormal` guide. If a positive integer or None, use an :class:`~pyro.infer.autoguide.AutoLowRankMultivariateNormal` guide. If the string "full", use an :class:`~pyro.infer.autoguide.AutoMultivariateNormal` guide. These latter two require more ``num_steps`` to fit. :param float init_scale: Initial scale of the :class:`~pyro.infer.autoguide.AutoLowRankMultivariateNormal` guide. :param bool jit: Whether to use a jit compiled ELBO. :param int log_every: How often to log svi losses. :param int heuristic_num_particles: Passed to :meth:`heuristic` as ``num_particles``. Defaults to 1024. :returns: Time series of SVI losses (useful to diagnose convergence). :rtype: list
https://github.com/pyro-ppl/pyro/blob/751843a16ffca0fec0ec722aa4d57cad246db648/pyro/contrib/epidemiology/compartmental.py#L384-L531
import functools import logging import operator import re import warnings from abc import ABC, abstractmethod from collections import OrderedDict from contextlib import ExitStack, contextmanager from functools import reduce from timeit import default_timer import torch from torch.distributions import biject_to, constraints from torch.distributions.utils import lazy_property import pyro.distributions as dist import pyro.distributions.hmm import pyro.poutine as poutine from pyro.distributions.transforms import HaarTransform from pyro.infer import ( MCMC, NUTS, SVI, JitTrace_ELBO, SMCFilter, Trace_ELBO, infer_discrete, ) from pyro.infer.autoguide import ( AutoLowRankMultivariateNormal, AutoMultivariateNormal, AutoNormal, init_to_generated, init_to_value, ) from pyro.infer.mcmc import ArrowheadMassMatrix from pyro.infer.reparam import HaarReparam, SplitReparam from pyro.infer.smcfilter import SMCFailed from pyro.infer.util import is_validation_enabled from pyro.optim import ClippedAdam from pyro.poutine.util import site_is_factor, site_is_subsample from pyro.util import warn_if_nan from .distributions import ( set_approx_log_prob_tol, set_approx_sample_thresh, set_relaxed_distributions, ) from .util import align_samples, cat2, clamp, quantize, quantize_enumerate logger = logging.getLogger(__name__) def _require_double_precision(): if torch.get_default_dtype() != torch.float64: warnings.warn( "CompartmentalModel is unstable for dtypes less than torch.float64; " "try torch.set_default_dtype(torch.float64)", RuntimeWarning, ) @contextmanager def _disallow_latent_variables(section_name): if not is_validation_enabled(): yield return with poutine.trace() as tr: yield for name, site in tr.trace.nodes.items(): if site["type"] == "sample" and not site["is_observed"]: raise NotImplementedError( "{} contained latent variable {}".format(section_name, name) ) class CompartmentalModel(ABC): def __init__(self, compartments, duration, population, *, approximate=()): super().__init__() assert isinstance(duration, int) assert duration >= 1 self.duration = duration if isinstance(population, torch.Tensor): assert population.dim() == 1 assert (population >= 1).all() self.is_regional = True self.max_plate_nesting = 2 else: assert isinstance(population, int) assert population >= 2 self.is_regional = False self.max_plate_nesting = 1 self.population = population compartments = tuple(compartments) assert all(isinstance(name, str) for name in compartments) assert len(compartments) == len(set(compartments)) self.compartments = compartments assert isinstance(approximate, tuple) assert all(name in compartments for name in approximate) self.approximate = approximate self.samples = {} self._clear_plates() @property def time_plate(self): if self._time_plate is None: self._time_plate = pyro.plate( "time", self.duration, dim=-2 if self.is_regional else -1 ) return self._time_plate @property def region_plate(self): if self._region_plate is None: if self.is_regional: self._region_plate = pyro.plate("region", len(self.population), dim=-1) else: self._region_plate = ExitStack() return self._region_plate def _clear_plates(self): self._time_plate = None self._region_plate = None @lazy_property def full_mass(self): with torch.no_grad(), poutine.block(), poutine.trace() as tr: self.global_model() return [ tuple( name for name, site in tr.trace.iter_stochastic_nodes() if not site_is_subsample(site) ) ] @lazy_property def series(self): with torch.no_grad(), poutine.block(): params = self.global_model() prev = self.initialize(params) for name in self.approximate: prev[name + "_approx"] = prev[name] curr = prev.copy() with poutine.trace() as tr: self.transition(params, curr, 0) return frozenset( re.match("(.*)_0", name).group(1) for name, site in tr.trace.nodes.items() if site["type"] == "sample" if not site_is_subsample(site) ) def global_model(self): return None @abstractmethod def initialize(self, params): raise NotImplementedError @abstractmethod def transition(self, params, state, t): raise NotImplementedError def finalize(self, params, prev, curr): pass def compute_flows(self, prev, curr, t): flows = {} flow = 0 for source, destin in zip(self.compartments, self.compartments[1:] + ("R",)): flow = prev[source] - curr[source] + flow flows["{}2{}_{}".format(source, destin, t)] = flow return flows @torch.no_grad() @set_approx_sample_thresh(1000) def generate(self, fixed={}): fixed = {k: torch.as_tensor(v) for k, v in fixed.items()} model = self._generative_model model = poutine.condition(model, fixed) trace = poutine.trace(model).get_trace() samples = OrderedDict( (name, site["value"]) for name, site in trace.nodes.items() if site["type"] == "sample" ) self._concat_series(samples, trace) return samples
Apache License 2.0
bomquote/transistor
transistor/browsers/splash_browser.py
SplashBrowser._add_soup
python
def _add_soup(self, response, soup_config): if self.resp_headers: if ("text/html" in self.resp_content_type_header or SplashBrowser.__looks_like_html(self.html)): response.soup = bs4.BeautifulSoup(self.html, **soup_config) elif SplashBrowser.__looks_like_html(self.html): response.soup = bs4.BeautifulSoup(self.html, **soup_config) else: response.soup = None return response
Attaches a soup object to a requests response.
https://github.com/bomquote/transistor/blob/4bc5eaa1beac334cd05f2149a1dd584e0d803921/transistor/browsers/splash_browser.py#L290-L300
import bs4 import sys import random import gevent from requests import Response from requests.exceptions import Timeout from mechanicalsoup.stateful_browser import _BrowserState, StatefulBrowser from mechanicalsoup.utils import LinkNotFoundError from mechanicalsoup.form import Form from transistor.utility.utils import obsolete_setter from transistor.browsers.mixin import SplashBrowserMixin class SplashBrowser(StatefulBrowser, SplashBrowserMixin): retry = 0 def __init__(self, *args, **kwargs): self._set_raw_content(content=b'') self._set_status(status='') self.__state = _BrowserState() self._test_true = False self.timeout_exception = False self.flags = kwargs.pop('flags', None) self.priority = kwargs.pop('priority', 0) if kwargs.pop('meta', None): self._meta = dict(kwargs.pop('meta')) else: self._meta = None self.callback = None self.errback = None super().__init__(*args, **kwargs) @property def meta(self): if self._meta is None: self._meta = {} return self._meta def _get_raw_content(self): return self._content def _set_raw_content(self, content): if content is None: self._content = b'' elif not isinstance(content, bytes): raise TypeError( "Response content must be bytes.") else: self._content = content raw_content = property(_get_raw_content, obsolete_setter(_set_raw_content, 'raw_content')) def _get_status(self): return self._status def _set_status(self, status): if status is None: self._status = '' else: self._status = status status = property(_get_status, obsolete_setter(_set_status, 'status')) def get_current_form(self): return self.__state.form def get_current_page(self): return self.__state.page def get_current_url(self): return self.__state.url def get_current_request(self): return self.__state.request def _update_state(self, response): self._set_raw_content(response.content) self._set_status(response.status_code) self._add_soup(response, self.soup_config) self.__state = _BrowserState(page=response.soup, url=response.url, request=response.request) def open(self, url, *args, **kwargs): if self.get_verbose() == 1: sys.stdout.write('.') sys.stdout.flush() elif self.get_verbose() >= 2: print(url) resp = self.stateful_post(url, *args, **kwargs) return resp def open_fake_page(self, page_text, status_code=None, url=None, soup_config=None): soup_config = soup_config or self.soup_config self._test_true = True self._set_raw_content(page_text.encode()) self._set_status(status_code) self.__state = _BrowserState( page=bs4.BeautifulSoup(page_text, **soup_config), url=url) def refresh(self): old_request = self.__state.request if old_request is None: raise ValueError('The current page is not refreshable. Either no ' 'page is opened or low-level browser methods ' 'were used to do so.') resp = self.session.send(old_request) self._update_state(resp) return resp def select_form(self, selector="form", nr=0): if isinstance(selector, bs4.element.Tag): if selector.name != "form": raise LinkNotFoundError self.__state.form = Form(selector) else: found_forms = self.get_current_page().select(selector, limit=nr + 1) if len(found_forms) != nr + 1: if self.__debug: print('select_form failed for', selector) self.launch_browser() raise LinkNotFoundError() self.__state.form = Form(found_forms[-1]) return self.get_current_form() def submit(self, form, url=None, **kwargs): if isinstance(form, Form): form = form.form response = self._request(form, url, **kwargs) self._add_soup(response, self.soup_config) return response def submit_selected(self, btnName=None, *args, **kwargs): self.get_current_form().choose_submit(btnName) referer = self.get_current_url() if referer: if 'headers' in kwargs: kwargs['headers']['Referer'] = referer else: kwargs['headers'] = {'Referer': referer} resp = self.submit(self.__state.form, url=self.__state.url, **kwargs) self._update_state(resp) return resp @staticmethod def __looks_like_html(blob): text = blob.lstrip().lower() return text.startswith('<html') or text.startswith('<!doctype')
MIT License
ntoll/p4p2p
p4p2p/dht/contact.py
PeerNode.__init__
python
def __init__(self, public_key, ip_address, port, version, last_seen=0): hex_digest = sha512(public_key.encode('ascii')).hexdigest() self.network_id = '0x' + hex_digest self.public_key = public_key self.ip_address = ip_address self.port = port self.version = version self.last_seen = last_seen self.failed_RPCs = 0
Initialise the peer node with a unique id within the network (derived from its public key), IP address, port the p4p2p version the contact is running and a timestamp indicating when the last connection was made with the contact (defaults to 0). The network id is created as the hexdigest of the SHA512 of the public key.
https://github.com/ntoll/p4p2p/blob/189a35ae964bef7e6db094283f3ead79c6356a6c/p4p2p/dht/contact.py#L13-L33
from hashlib import sha512 class PeerNode(object):
MIT License
libyal/winreg-kb
winregrc/services.py
WindowsServicesCollector.Compare
python
def Compare(self, registry, output_writer): system_key = registry.GetKeyByPath('HKEY_LOCAL_MACHINE\\System\\') if not system_key: return False result = False control_sets = [] service_names = set() for control_set_key in system_key.GetSubkeys(): if control_set_key.name.startswith('ControlSet'): services_key = control_set_key.GetSubkeyByName('Services') if not services_key: continue result = True services = {} for windows_service in self._CollectWindowsServicesFromKey( services_key): if windows_service.name in services: continue windows_service_name = windows_service.name.lower() service_names.add(windows_service_name) services[windows_service_name] = windows_service control_sets.append(services) number_of_control_sets = len(control_sets) for name in service_names: services_diff = set() windows_service = control_sets[0].get(name, None) for control_set_index in range(1, number_of_control_sets): control_set = control_sets[control_set_index] compare_windows_service = control_set.get(name, None) if windows_service != compare_windows_service: services_diff.add(windows_service) services_diff.add(compare_windows_service) for windows_service in services_diff: if not windows_service: if self._debug: print('Not defined') else: output_writer.WriteWindowsService(windows_service) return result
Compares services in the different control sets. Args: registry (dfwinreg.WinRegistry): Windows Registry. output_writer (OutputWriter): output writer. Returns: bool: True if the services key was found, False if not.
https://github.com/libyal/winreg-kb/blob/ec90fa47511c45bd31c876f8f2702af605dd6229/winregrc/services.py#L228-L286
from winregrc import interface class WindowsService(object): _OBJECT_NAME_DESCRIPTIONS = { 0x00000010: 'Account name', 0x00000020: 'Account name', 0x00000110: 'Account name', } _SERVICE_TYPE_DESCRIPTIONS = { 0x00000001: 'Kernel device driver', 0x00000002: 'File system driver', 0x00000004: 'Adapter arguments', 0x00000010: 'Stand-alone service', 0x00000020: 'Shared service', } _START_VALUE_DESCRIPTIONS = { 0x00000000: 'Boot', 0x00000001: 'System', 0x00000002: 'Automatic', 0x00000003: 'On demand', 0x00000004: 'Disabled', } def __init__( self, name, service_type, display_name, description, image_path, object_name, start_value): super(WindowsService, self).__init__() self.description = description self.display_name = display_name self.image_path = image_path self.name = name self.object_name = object_name self.service_type = service_type self.start_value = start_value def __eq__(self, other): return ( other is not None and self.description == other.description and self.display_name == other.display_name and self.image_path == other.image_path and self.name == other.name and self.object_name == other.object_name and self.service_type == other.service_type and self.start_value == other.start_value) def __ne__(self, other): return ( other is None or self.description != other.description or self.display_name != other.display_name or self.image_path != other.image_path or self.name != other.name or self.object_name != other.object_name or self.service_type != other.service_type or self.start_value != other.start_value) def GetObjectNameDescription(self): return(self._OBJECT_NAME_DESCRIPTIONS.get( self.service_type, 'Object name')) def GetServiceTypeDescription(self): return(self._SERVICE_TYPE_DESCRIPTIONS.get( self.service_type, 'Unknown 0x{0:08x}'.format(self.service_type))) def GetStartValueDescription(self): return(self._START_VALUE_DESCRIPTIONS.get( self.start_value, 'Unknown 0x{0:08x}'.format(self.start_value))) class WindowsServicesCollector(interface.WindowsRegistryKeyCollector): def _CollectWindowsServicesFromKey(self, services_key): for service_key in services_key.GetSubkeys(): type_value = service_key.GetValueByName('Type') if type_value: type_value = type_value.GetDataAsObject() display_name_value = service_key.GetValueByName('DisplayName') if display_name_value: if display_name_value.DataIsString(): display_name_value = display_name_value.GetDataAsObject() else: display_name_value = None description_value = service_key.GetValueByName('Description') if description_value: description_value = description_value.GetDataAsObject() image_path_value = service_key.GetValueByName('ImagePath') if image_path_value: image_path_value = image_path_value.GetDataAsObject() object_name_value = service_key.GetValueByName('ObjectName') if object_name_value: object_name_value = object_name_value.GetDataAsObject() start_value = service_key.GetValueByName('Start') if start_value: start_value = start_value.GetDataAsObject() yield WindowsService( service_key.name, type_value, display_name_value, description_value, image_path_value, object_name_value, start_value) def Collect(self, registry, output_writer, all_control_sets=False): result = False if all_control_sets: system_key = registry.GetKeyByPath('HKEY_LOCAL_MACHINE\\System\\') if not system_key: return result for control_set_key in system_key.GetSubkeys(): if control_set_key.name.startswith('ControlSet'): services_key = control_set_key.GetSubkeyByName('Services') if services_key: result = True if self._debug: print('Control set: {0:s}'.format(control_set_key.name)) print('\tNumber of entries\t: {0:d}'.format( services_key.number_of_subkeys)) print('') for windows_service in self._CollectWindowsServicesFromKey( services_key): output_writer.WriteWindowsService(windows_service) else: try: services_key = registry.GetKeyByPath( 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Services') except RuntimeError: services_key = None if services_key: result = True if self._debug: print('Current control set') print('\tNumber of entries\t: {0:d}'.format( services_key.number_of_subkeys)) print('') for windows_service in self._CollectWindowsServicesFromKey( services_key): output_writer.WriteWindowsService(windows_service) return result
Apache License 2.0
cgatoxford/cgatpipelines
obsolete/pipeline_timeseries.py
buildCombinedExpression
python
def buildCombinedExpression(infiles, outfile): infiles = " ".join(infiles) statement = ''' cgat combine_tables --columns=1 --log=%(outfile)s.log %(infiles)s | sed 's/Geneid/gene_id/' | sed 's/\-/\./g' | tee %(outfile)s.table.tsv | gzip > %(outfile)s ''' P.run()
aggregate together all of the datasets for a combined all-vs-all analysis
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/obsolete/pipeline_timeseries.py#L344-L361
from ruffus import * import sys import os import itertools import re import sqlite3 import glob import pandas as pd import rpy2.robjects as ro import CGAT.Experiment as E import CGAT.Timeseries as Timeseries import CGATPipelines.PipelineTracks as PipelineTracks import CGATPipelines.Pipeline as P P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS = P.PARAMS GENESETS = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory( glob.glob("*.gtf.gz"), "(\S+).gtf.gz") TRACKS3 = PipelineTracks.Tracks(PipelineTracks.Sample3) TRACKS = TRACKS3.loadFromDirectory(glob.glob("*.bam"), "(\S+).bam") REPLICATE = PipelineTracks.Aggregate(TRACKS, labels=("replicate", )) TIME = PipelineTracks.Aggregate(TRACKS, labels=("condition", "tissue")) def connect(): dbh = sqlite3.connect(PARAMS["database_name"]) statement = '''ATTACH DATABASE '%s' as annotations''' % ( PARAMS["annotations_database"]) cc = dbh.cursor() cc.execute(statement) cc.close() return dbh @follows(connect) @transform("reference.gtf.gz", suffix("reference.gtf.gz"), "refcoding.gtf.gz") def buildCodingGeneSet(infile, outfile): statement = ''' zcat %(infile)s | awk '$2 == "protein_coding"' | gzip > %(outfile)s ''' P.run() @follows(mkdir("feature_counts.dir")) @files([(("%s.bam" % x.asFile(), "%s.gtf.gz" % y.asFile()), ("feature_counts.dir/%s_vs_%s.tsv.gz" % (x.asFile(), y.asFile()))) for x, y in itertools.product(TRACKS, GENESETS)]) def buildFeatureCounts(infiles, outfile): infile, annotations = infiles outfile = P.snip(outfile, ".gz") annotations_tmp = P.getTempFilename() if PARAMS['featurecounts_paired'] == "1": paired = "-p -B" else: paired = "" job_options = "-pe dedicated %i" % PARAMS['featurecounts_threads'] statement = ''' zcat %(annotations)s > %(annotations_tmp)s; checkpoint; featureCounts %(featurecounts_options)s -T %(featurecounts_threads)s -s %(featurecounts_strand)s -b -a %(annotations_tmp)s -o %(outfile)s %(infile)s > %(outfile)s.log; checkpoint; gzip %(outfile)s; checkpoint; rm %(annotations_tmp)s ''' P.run() @collate(buildFeatureCounts, regex("feature_counts.dir/(.+)-(.+)-(.+)_vs_(.+).tsv.gz"), r"feature_counts.dir/\1-\4-feature_counts.tsv.gz") def aggregateFeatureCounts(infiles, outfile): infiles = " ".join(infiles) statement = ''' cgat combine_tables --columns=1 --take=7 --use-file-prefix --regex-filename='(.+)_vs.+.tsv.gz' --log=%(outfile)s.log %(infiles)s | sed 's/Geneid/gene_id/' | sed 's/\-/\./g' | tee %(outfile)s.table.tsv | gzip > %(outfile)s ''' P.run() @transform(aggregateFeatureCounts, suffix(".tsv.gz"), ".load") def loadFeatureCounts(infile, outfile): P.load(infile, outfile, "--add-index=gene_id") @follows(mkdir("combined_analysis.dir"), aggregateFeatureCounts) @collate(aggregateFeatureCounts, regex("feature_counts.dir/(.+)-(.+)-feature_counts.tsv.gz"), r"combined_analysis.dir/\2-combined.tsv.gz")
MIT License
viniciuschiele/flask-apscheduler
examples/application_factory/events.py
job_executed
python
def job_executed(event): with scheduler.app.app_context(): print(event)
Job executed event.
https://github.com/viniciuschiele/flask-apscheduler/blob/af6f32f35998478c16b0a876e917966c52de1fe4/examples/application_factory/events.py#L28-L31
from apscheduler.events import ( EVENT_JOB_ADDED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED, EVENT_JOB_MISSED, EVENT_JOB_REMOVED, EVENT_JOB_SUBMITTED, ) from .extensions import scheduler def job_missed(event): with scheduler.app.app_context(): print(event) def job_error(event): with scheduler.app.app_context(): print(event)
Apache License 2.0
rebiocoder/bioforum
venv/Lib/site-packages/django/db/backends/sqlite3/base.py
DatabaseWrapper._start_transaction_under_autocommit
python
def _start_transaction_under_autocommit(self): self.cursor().execute("BEGIN")
Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/db/backends/sqlite3/base.py#L277-L284
import decimal import math import re import warnings from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from .client import DatabaseClient from .creation import DatabaseCreation from .features import DatabaseFeatures from .introspection import DatabaseIntrospection from .operations import DatabaseOperations from .schema import DatabaseSchemaEditor def decoder(conv_func): return lambda s: conv_func(s.decode()) Database.register_converter("bool", lambda s: s == b'1') Database.register_converter("time", decoder(parse_time)) Database.register_converter("date", decoder(parse_date)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_converter("TIMESTAMP", decoder(parse_datetime)) Database.register_converter("decimal", decoder(decimal.Decimal)) Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', } operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { 'database': settings_dict['NAME'], 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, } kwargs.update(settings_dict['OPTIONS']) if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False}) if self.features.can_share_in_memory_db: kwargs.update({'uri': True}) return kwargs def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_date_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date) conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time) conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract) conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc) conn.create_function("django_time_extract", 2, _sqlite_time_extract) conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) conn.create_function("django_time_diff", 2, _sqlite_time_diff) conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) conn.create_function("regexp", 2, _sqlite_regexp) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function("django_power", 2, _sqlite_power) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) def close(self): self.validate_thread_sharing() if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): return self.features.uses_savepoints and self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: level = '' with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): if self.in_atomic_block: return False self.cursor().execute('PRAGMA foreign_keys = OFF') return True def enable_constraint_checking(self): self.cursor().execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): cursor = self.cursor() if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True
MIT License
devopshq/teamcity
dohq_teamcity/api/build_api.py
BuildApi.get_pinned
python
def get_pinned(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_pinned_with_http_info(build_locator, **kwargs) else: (data) = self.__get_pinned_with_http_info(build_locator, **kwargs) return data
get_pinned # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_pinned(build_locator, async_req=True) >>> result = thread.get() :param async_req: bool :param str build_locator: (required) :return: str If the method is called asynchronously, returns the request thread.
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/api/build_api.py#L552-L571
from __future__ import absolute_import from dohq_teamcity.custom.base_model import TeamCityObject import re import six from dohq_teamcity.models.build import Build from dohq_teamcity.models.build_cancel_request import BuildCancelRequest from dohq_teamcity.models.build_changes import BuildChanges from dohq_teamcity.models.builds import Builds from dohq_teamcity.models.comment import Comment from dohq_teamcity.models.file import File from dohq_teamcity.models.files import Files from dohq_teamcity.models.issues_usages import IssuesUsages from dohq_teamcity.models.model_property import ModelProperty from dohq_teamcity.models.problem_occurrences import ProblemOccurrences from dohq_teamcity.models.properties import Properties from dohq_teamcity.models.tags import Tags from dohq_teamcity.models.test_occurrences import TestOccurrences from dohq_teamcity.models.file import file class BuildApi(object): base_name = 'Build' def __init__(self, api_client=None): self.api_client = api_client def add_tags(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__add_tags_with_http_info(build_locator, **kwargs) else: (data) = self.__add_tags_with_http_info(build_locator, **kwargs) return data def cancel_build(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__cancel_build_with_http_info(build_locator, **kwargs) else: (data) = self.__cancel_build_with_http_info(build_locator, **kwargs) return data def cancel_build_0(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__cancel_build_0_with_http_info(build_locator, **kwargs) else: (data) = self.__cancel_build_0_with_http_info(build_locator, **kwargs) return data def delete_all_parameters(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_all_parameters_with_http_info(build_locator, **kwargs) else: (data) = self.__delete_all_parameters_with_http_info(build_locator, **kwargs) return data def delete_build(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_build_with_http_info(build_locator, **kwargs) else: (data) = self.__delete_build_with_http_info(build_locator, **kwargs) return data def delete_builds(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_builds_with_http_info(**kwargs) else: (data) = self.__delete_builds_with_http_info(**kwargs) return data def delete_comment(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_comment_with_http_info(build_locator, **kwargs) else: (data) = self.__delete_comment_with_http_info(build_locator, **kwargs) return data def delete_parameter(self, name, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_parameter_with_http_info(name, build_locator, **kwargs) else: (data) = self.__delete_parameter_with_http_info(name, build_locator, **kwargs) return data def get_artifact_dependency_changes(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_artifact_dependency_changes_with_http_info(build_locator, **kwargs) else: (data) = self.__get_artifact_dependency_changes_with_http_info(build_locator, **kwargs) return data def get_artifacts_directory(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_artifacts_directory_with_http_info(build_locator, **kwargs) else: (data) = self.__get_artifacts_directory_with_http_info(build_locator, **kwargs) return data def get_build_number(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_build_number_with_http_info(build_locator, **kwargs) else: (data) = self.__get_build_number_with_http_info(build_locator, **kwargs) return data def get_build_status_text(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_build_status_text_with_http_info(build_locator, **kwargs) else: (data) = self.__get_build_status_text_with_http_info(build_locator, **kwargs) return data def get_canceled_info(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_canceled_info_with_http_info(build_locator, **kwargs) else: (data) = self.__get_canceled_info_with_http_info(build_locator, **kwargs) return data def get_children(self, path, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_children_with_http_info(path, build_locator, **kwargs) else: (data) = self.__get_children_with_http_info(path, build_locator, **kwargs) return data def get_children_alias(self, path, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_children_alias_with_http_info(path, build_locator, **kwargs) else: (data) = self.__get_children_alias_with_http_info(path, build_locator, **kwargs) return data def get_content(self, path, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_content_with_http_info(path, build_locator, **kwargs) else: (data) = self.__get_content_with_http_info(path, build_locator, **kwargs) return data def get_content_alias(self, path, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_content_alias_with_http_info(path, build_locator, **kwargs) else: (data) = self.__get_content_alias_with_http_info(path, build_locator, **kwargs) return data def get_metadata(self, path, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_metadata_with_http_info(path, build_locator, **kwargs) else: (data) = self.__get_metadata_with_http_info(path, build_locator, **kwargs) return data def get_parameter(self, name, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_with_http_info(name, build_locator, **kwargs) else: (data) = self.__get_parameter_with_http_info(name, build_locator, **kwargs) return data def get_parameter_0(self, build_locator, property_name, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_0_with_http_info(build_locator, property_name, **kwargs) else: (data) = self.__get_parameter_0_with_http_info(build_locator, property_name, **kwargs) return data def get_parameter_value_long(self, name, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_value_long_with_http_info(name, build_locator, **kwargs) else: (data) = self.__get_parameter_value_long_with_http_info(name, build_locator, **kwargs) return data def get_parameters(self, build_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameters_with_http_info(build_locator, **kwargs) else: (data) = self.__get_parameters_with_http_info(build_locator, **kwargs) return data
MIT License
googlecloudplatform/solutions-cloud-orchestrate
api/orchestrateapi/commands/projects/deregister.py
remove_project_iam_binding
python
def remove_project_iam_binding(member, role): policy = resource_manager.projects().getIamPolicy( resource=environ.ORCHESTRATE_PROJECT, body=dict()).execute() if remove_iam_binding(policy, member, role): resource_manager.projects().setIamPolicy( resource=environ.ORCHESTRATE_PROJECT, body=dict(policy=policy)).execute()
Remove policy binding. Args: member: Account, e.g. user:joe@doe.com, serviceAccount:..., etc. role: Role.
https://github.com/googlecloudplatform/solutions-cloud-orchestrate/blob/abb7ca3128f99f10e78caa06c1ed9802656bef40/api/orchestrateapi/commands/projects/deregister.py#L77-L93
import uuid from googleapiclient import discovery from oauth2client.client import GoogleCredentials from google.cloud import error_reporting from orchestrateapi import environ from orchestrateapi import orchestrate_pb2 error_client = error_reporting.Client() credentials = GoogleCredentials.get_application_default() resource_manager = discovery.build('cloudresourcemanager', 'v1', credentials=credentials, cache_discovery=False) storage = discovery.build('storage', 'v1', credentials=credentials, cache_discovery=False) def run(request, context): print('Orchestrate.DeregisterProject project={project}'.format( project=request.project, )) request_id = uuid.uuid4().hex account = ( 'serviceAccount:orchestrate@{project}.iam.gserviceaccount.com').format( project=request.project) role = 'projects/{project}/roles/orchestrate.project'.format( project=environ.ORCHESTRATE_PROJECT) remove_project_iam_binding(account, role) remove_storage_iam_binding(account, 'roles/storage.objectViewer') return orchestrate_pb2.RegisterProjectResponse( status='DEREGISTERED', request_id=str(request_id), )
Apache License 2.0
albanie/pytorch-mcn
python/importer.py
Network.add_mod
python
def add_mod(self, name, inputs, outputs, params, mod, state_dict): if not isinstance(mod, pmu.PlaceHolder): mod_str = ensure_compatible_repr(mod) self.attr_str += ['self.{} = nn.{}'.format(name, mod_str)] outs = ','.join(outputs) ins = ','.join(inputs) if not self.input_vars: self.input_vars = ins self.output_vars = [v for v in self.output_vars if v not in inputs] self.output_vars.extend(outputs) if isinstance(mod, pmu.PlaceHolder): if isinstance(mod, pmu.Concat): func = str(mod).format(ins) else: func = str(mod).format(*inputs) forward_str = '{} = {}'.format(outs, func) else: forward_str = '{} = self.{}({})'.format(outs, name, ins) self.forward_str += [forward_str] if self.debug_mode: self.forward_debug_str += [forward_str] template = "self.debug_feats['{0}'] = {0}.clone()" forward_debug_str = template.format(outs) self.forward_debug_str += [forward_debug_str] if not params: return for idx, param_name in enumerate(params): if idx == 0: key = '{}.weight'.format(name) elif idx == 1: key = '{}.bias'.format(name) elif idx == 2: std_name = 'moments' in params[idx] std_suffixes = ['x', '_m'] std_sfx = any([params[idx].endswith(x) for x in std_suffixes]) msg = 'The third parameter should correspond to bn moments' assert std_name or std_sfx, msg state_dict = pmu.update_bnorm_moments( name=name, param_name=param_name, mcn_net=self.mcn_net, eps=mod.eps, state_dict=state_dict ) continue else: raise ValueError('unexpected number of params') val_idx = self.mcn_net['params']['name'].index(param_name) weights = self.mcn_net['params']['value'][val_idx] if 'Linear' in mod_str: if weights.ndim > 1 and weights.shape[1] > 1: kwargs = { "squeeze": True, "in_features": mod.in_features, "out_features": mod.out_features, } else: kwargs = {"squeeze": True} else: kwargs = {"squeeze": False} state_dict[key] = pmu.weights2tensor(weights, **kwargs)
Add computational module to pytorch network This function adds a representation of computational module (which is then used to generate the corresponding pytorch network source code) to the Network data structure. It is also responsible for tracking the outputs of the network as each new module is added, so that the correct variables are returned by the generated pytorch network's "forward()" function. Args: name (str): the name of the module inputs (List[str]): the names of the input variables to the module outputs (List[str]): the names of the output variables from the module params (List[str]): the names of the parameters of the module mod (torch.nn.Module): the corresponding pytorch module (if one is available for the given operation, otherwise a Placeholder object is used instead). state_dict (dict): the dictionary of network weights to be updated with the parameters of the given module.
https://github.com/albanie/pytorch-mcn/blob/c4c9bcfa727f41d62a65091b2601488c8bf04c7d/python/importer.py#L310-L401
import os from collections import OrderedDict, defaultdict import argparse import torch import torch.nn as nn import scipy.io as sio import numpy as np import ptmcn_utils as pmu import source_gen as sg def load_mcn_net(path): mcn = sio.loadmat(path, squeeze_me=False) for key in ['meta', 'params', 'layers']: assert key in mcn.keys() mcn_net = { 'meta': pmu.parse_struct(mcn['meta']), 'params': pmu.parse_struct(mcn['params']), 'layers': pmu.parse_struct(mcn['layers']), } mcn_net = pmu.fix_spio_issues(mcn_net) mcn_net = pmu.align_interfaces(mcn_net) return mcn_net def flatten_if_needed(nodes, complete_dag, is_flattened, flatten_layer): if not is_flattened: prev = nodes[-1] flatten_condition = (flatten_layer == 'last' and complete_dag) or (flatten_layer == prev['name']) if flatten_condition: name = '{}_flatten'.format(prev['name']) outputs = prev['outputs'] prev['outputs'] = ['{}_preflatten'.format(x) for x in prev['outputs']] node = { 'name': name, 'inputs': prev['outputs'], 'outputs': outputs, 'params': [], } node['mod'] = pmu.Flatten() nodes.append(node) is_flattened = True return nodes, is_flattened def extract_dag(mcn_net, inplace, drop_prob_softmax=True, in_ch=3, flatten_layer='last', **kwargs): nodes = [] is_flattened = False uses_functional = False num_layers = len(mcn_net['layers']['name']) in_ch_store = defaultdict(lambda: in_ch) for ii in range(num_layers): params = mcn_net['layers']['params'][ii] if params == {'': []}: params = None node = { 'name': mcn_net['layers']['name'][ii], 'inputs': mcn_net['layers']['inputs'][ii], 'outputs': mcn_net['layers']['outputs'][ii], 'params': params, } bt = mcn_net['layers']['type'][ii] block = mcn_net['layers']['block'][ii] opts = {'block': block, 'block_type': bt} in_chs = [in_ch_store[x] for x in node['inputs']] out_chs = in_chs if bt == 'dagnn.Conv': msg = 'conv layers should only take a single_input' if len(in_chs) != 1: import ipdb ; ipdb.set_trace() assert len(in_chs) == 1, msg mod, out_ch = pmu.conv2d_mod(block, in_chs[0], is_flattened, **kwargs) out_chs = [out_ch] elif bt == 'dagnn.BatchNorm': mod = pmu.batchnorm2d_mod(block, mcn_net, params) elif bt == 'dagnn.GlobalPooling': mod = pmu.globalpool_mod(block) elif bt == 'dagnn.ReLU': mod = nn.ReLU(inplace=inplace) elif bt == 'dagnn.Sigmoid': mod = nn.Sigmoid() elif bt == 'dagnn.Pooling': pad, ceil_mode = pmu.convert_padding(block['pad']) pool_opts = {'kernel_size': pmu.int_list(block['poolSize']), 'stride': pmu.int_list(block['stride']), 'padding': pad, 'ceil_mode': ceil_mode} if block['method'] == 'avg': pool_opts['count_include_pad'] = False mod = nn.AvgPool2d(**pool_opts) elif block['method'] == 'max': mod = nn.MaxPool2d(**pool_opts) else: msg = 'unknown pooling type: {}'.format(block['method']) raise ValueError(msg) elif bt == 'dagnn.DropOut': mod = nn.Dropout(p=block['rate']) elif bt == 'dagnn.Permute': mod = pmu.Permute(**opts) elif bt == 'dagnn.Reshape': mod = pmu.Reshape(**opts) elif bt == 'dagnn.Axpy': mod = pmu.Axpy(**opts) elif bt == 'dagnn.Flatten': mod = pmu.Flatten(**opts) is_flattened = True out_chs = [1] elif bt == 'dagnn.Concat': mod = pmu.Concat(**opts) out_chs = [sum(in_chs)] elif bt == 'dagnn.Sum': mod = pmu.Sum(**opts) out_chs = [in_chs[0]] elif bt == 'dagnn.AffineGridGenerator': mod = pmu.AffineGridGen(height=block['Ho'], width=block['Wo'], **opts) uses_functional = True elif bt == 'dagnn.BilinearSampler': mod = pmu.BilinearSampler(**opts) uses_functional = True elif bt in ['dagnn.Loss', 'dagnn.SoftmaxCELoss']: if kwargs['verbose']: print('skipping loss layer: {}'.format(node['name'])) continue elif (bt == 'dagnn.SoftMax' and (ii == num_layers - 1) and drop_prob_softmax): continue else: import ipdb ; ipdb.set_trace() for output, out_ch in zip(node['outputs'], out_chs): in_ch_store[output] = out_ch node['mod'] = mod nodes += [node] complete_dag = (ii == num_layers - 1) nodes, is_flattened = flatten_if_needed(nodes, complete_dag, is_flattened, flatten_layer) return nodes, uses_functional def ensure_compatible_repr(mod): repr_str = str(mod) repr_str = repr_str.replace('Conv2d (', 'Conv2d(') if isinstance(mod, nn.MaxPool2d): if 'ceil_mode' not in repr_str: assert repr_str[-2:] == '))', 'unexpected repr format' repr_str = repr_str[:-1] + ', ceil_mode={})'.format(mod.ceil_mode) elif isinstance(mod, nn.Linear): if 'bias' not in repr_str: assert repr_str[-1:] == ')', 'unexpected repr format' bias = mod.bias is not None repr_str = repr_str[:-1] + ', bias={})'.format(bias) elif isinstance(mod, nn.ReLU): if mod.inplace: repr_str = "ReLU(inplace=True)" return repr_str class Network(nn.Module): def __init__(self, name, mcn_net, meta, uses_functional, flatten_layer, debug_mode=True): super(Network, self).__init__() self.name = pmu.capitalize_first_letter(name) self.attr_str = [] self.meta = meta self.forward_str = [] self.mcn_net = mcn_net self.uses_functional = uses_functional self.input_vars = None self.output_vars = [] self.forward_debug_str = [] self.debug_mode = debug_mode self.flatten_layer = flatten_layer def indenter(self, x, depth=2): num_spaces = 4 indent = ' ' * depth * num_spaces return indent + '{}\n'.format(x) def forward_return(self): return 'return {}'.format(', '.join(self.output_vars))
MIT License
mitmproxy/mitmproxy
mitmproxy/net/encoding.py
decode
python
def decode( encoded: Union[None, str, bytes], encoding: str, errors: str = 'strict' ) -> Union[None, str, bytes]: if encoded is None: return None encoding = encoding.lower() global _cache cached = ( isinstance(encoded, bytes) and _cache.encoded == encoded and _cache.encoding == encoding and _cache.errors == errors ) if cached: return _cache.decoded try: try: decoded = custom_decode[encoding](encoded) except KeyError: decoded = codecs.decode(encoded, encoding, errors) if encoding in ("gzip", "deflate", "deflateraw", "br", "zstd"): _cache = CachedDecode(encoded, encoding, errors, decoded) return decoded except TypeError: raise except Exception as e: raise ValueError("{} when decoding {} with {}: {}".format( type(e).__name__, repr(encoded)[:10], repr(encoding), repr(e), ))
Decode the given input object Returns: The decoded value Raises: ValueError, if decoding fails.
https://github.com/mitmproxy/mitmproxy/blob/667d4e04749a4bc2212f58fa2b8c31cd1d91fc7b/mitmproxy/net/encoding.py#L41-L82
import codecs import collections from io import BytesIO import gzip import zlib import brotli import zstandard as zstd from typing import Union, Optional, AnyStr, overload CachedDecode = collections.namedtuple( "CachedDecode", "encoded encoding errors decoded" ) _cache = CachedDecode(None, None, None, None) @overload def decode(encoded: None, encoding: str, errors: str = 'strict') -> None: ... @overload def decode(encoded: str, encoding: str, errors: str = 'strict') -> str: ... @overload def decode(encoded: bytes, encoding: str, errors: str = 'strict') -> Union[str, bytes]: ...
MIT License
whbrewer/spc
src/gluino/contrib/hypermedia.py
Collection.row2data
python
def row2data(self,table,row,text=False): data = [] if self.compact: for fieldname in (self.table_policy.get('fields',table.fields)): field = table[fieldname] if not ((field.type=='text' and text==False) or field.type=='blob' or field.type.startswith('reference ') or field.type.startswith('list:reference ')) and field.name in row: data.append(row[field.name]) else: for fieldname in (self.table_policy.get('fields',table.fields)): field = table[fieldname] if not ((field.type=='text' and text==False) or field.type=='blob' or field.type.startswith('reference ') or field.type.startswith('list:reference ')) and field.name in row: data.append({'name':field.name,'value':row[field.name], 'prompt':field.label, 'type':field.type}) return data
converts a DAL Row object into a collection.item
https://github.com/whbrewer/spc/blob/859f15e0fcb3f5f7d84d420f4757ae0a42c5837a/src/gluino/contrib/hypermedia.py#L44-L64
import json from collections import OrderedDict from gluon import URL, IS_SLUG __all__ = ['Collection'] class Collection(object): VERSION = '1.0' MAXITEMS = 100 def __init__(self,db, extensions=True, compact=False): self.db = db self.extensions = extensions self.compact = compact
MIT License
crs4/pyehr
pyehr/ehr/services/dbmanager/drivers/interface.py
DriverInterface.delete_record
python
def delete_record(self, record_id): pass
Delete a record from the backend server by giving the record ID
https://github.com/crs4/pyehr/blob/936df084861bfb075f013567d6181585b2754394/pyehr/ehr/services/dbmanager/drivers/interface.py#L152-L156
from abc import ABCMeta, abstractmethod from pyehr.ehr.services.dbmanager.errors import * import re, json from hashlib import md5 class DriverInterface(object): __metaclass__ = ABCMeta def __enter__(self): self.connect() return self def __exit__(self, exception_type, exception_value, traceback): self.disconnect() return None @abstractmethod def connect(self): pass @abstractmethod def disconnect(self): pass @abstractmethod def init_structure(self, structure_def): pass @abstractmethod def encode_record(self, record): pass @abstractmethod def decode_record(self, record): pass @abstractmethod def add_record(self, record): pass @abstractmethod def add_records(self, records, skip_existing_duplicated=False): errors = list() saved = list() for r in records: try: saved.append(self.add_record(r)) except DuplicatedKeyError, dke: if skip_existing_duplicated: errors.append(r) else: raise dke return saved, errors def _check_batch(self, records_batch, uid_field): from collections import Counter duplicated_counter = Counter() for r in records_batch: duplicated_counter[r[uid_field]] += 1 if len(duplicated_counter) < len(records_batch): raise DuplicatedKeyError('The following IDs have one or more duplicated in this batch: %s' % [k for k, v in duplicated_counter.iteritems() if v > 1]) @abstractmethod def get_record_by_id(self, record_id): pass @abstractmethod def get_record_by_version(self, record_id, version): pass @abstractmethod def get_revisions_by_ehr_id(self, record_id): pass @abstractmethod def get_all_records(self): pass @abstractmethod def get_records_by_value(self, field, value): pass @abstractmethod def get_records_by_query(self, selector, fields, limit): pass @abstractmethod def get_values_by_record_id(self, record_id, values_list): pass @abstractmethod def count_records_by_query(self, selector): pass @abstractmethod
MIT License
steemit/hivemind
hive/server/condenser_api/methods.py
get_post_discussions_by_payout
python
async def get_post_discussions_by_payout(context, start_author: str = '', start_permlink: str = '', limit: int = 20, tag: str = None, truncate_body: int = 0): ids = await cursor.pids_by_query( context['db'], 'payout', valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100), valid_tag(tag, allow_empty=True)) return await load_posts(context['db'], ids, truncate_body=truncate_body)
Query top-level posts, sorted by payout.
https://github.com/steemit/hivemind/blob/d99b852e1ad321aeb67eaec5fb03f7bfb32c75d6/hive/server/condenser_api/methods.py#L344-L355
from functools import wraps import hive.server.condenser_api.cursor as cursor from hive.server.condenser_api.objects import load_posts, load_posts_reblogs from hive.server.common.helpers import ( ApiError, return_error_info, valid_account, valid_permlink, valid_tag, valid_offset, valid_limit, valid_follow_type) @return_error_info async def get_account_votes(context, account): raise ApiError("get_account_votes is no longer supported, for details see " "https://steemit.com/steemit/@steemitdev/additional-public-api-change") def _legacy_follower(follower, following, follow_type): return dict(follower=follower, following=following, what=[follow_type]) def _legacy_follower_with_reputation (follower, reputation, following, follow_type): what = ['',''] if follow_type & 1 != 0: what[0] = 'blog' if follow_type & 2 != 0: what[1] = 'ignore' return dict(follower=follower, reputation=reputation, following=following, what=what) @return_error_info async def get_followers(context, account: str, start: str, follow_type: str = None, limit: int = None, **kwargs): if not follow_type and 'type' in kwargs: follow_type = kwargs['type'] if not follow_type: follow_type = 'blog' followers = await cursor.get_followers( context['db'], valid_account(account), valid_account(start, allow_empty=True), valid_follow_type(follow_type), valid_limit(limit, 1000)) return [_legacy_follower_with_reputation(row['name'], row['reputation'],account,row['state']) for row in followers] @return_error_info async def get_followers_by_page(context, account: str, page: int, page_size: int = None, follow_type: str = None, **kwargs): if not follow_type and 'type' in kwargs: follow_type = kwargs['type'] if not follow_type: follow_type = 'blog' followers = await cursor.get_followers_by_page( context['db'], valid_account(account), valid_offset(page), valid_limit(page_size, 100), valid_follow_type(follow_type)) return [_legacy_follower_with_reputation(row['name'], row['reputation'],account,row['state']) for row in followers] @return_error_info async def get_following(context, account: str, start: str, follow_type: str = None, limit: int = None, **kwargs): if not follow_type and 'type' in kwargs: follow_type = kwargs['type'] if not follow_type: follow_type = 'blog' following = await cursor.get_following( context['db'], valid_account(account), valid_account(start, allow_empty=True), valid_follow_type(follow_type), valid_limit(limit, 1000)) return [_legacy_follower_with_reputation(account,row['reputation'],row['name'],row['state']) for row in following] @return_error_info async def get_following_by_page(context, account: str, page: int, page_size: int = None, follow_type: str = None, **kwargs): if not follow_type and 'type' in kwargs: follow_type = kwargs['type'] if not follow_type: follow_type = 'blog' following = await cursor.get_following_by_page( context['db'], valid_account(account), valid_offset(page), valid_limit(page_size, 100), valid_follow_type(follow_type)) return [_legacy_follower_with_reputation(account,row['reputation'],row['name'],row['state']) for row in following] @return_error_info async def get_follow_count(context, account: str): count = await cursor.get_follow_counts( context['db'], valid_account(account)) return dict(account=account, following_count=count['following'], follower_count=count['followers']) @return_error_info async def get_reblogged_by(context, author: str, permlink: str): return await cursor.get_reblogged_by( context['db'], valid_account(author), valid_permlink(permlink)) @return_error_info async def get_account_reputations(context, account_lower_bound: str = None, limit: int = None): return {'reputations': await cursor.get_account_reputations( context['db'], account_lower_bound, valid_limit(limit, 1000))} @return_error_info async def get_content(context, author: str, permlink: str): db = context['db'] valid_account(author) valid_permlink(permlink) post_id = await cursor.get_post_id(db, author, permlink) if not post_id: return {'id': 0, 'author': '', 'permlink': ''} posts = await load_posts(db, [post_id]) assert posts, 'post was not found in cache' return posts[0] @return_error_info async def get_content_replies(context, author: str, permlink: str): db = context['db'] valid_account(author) valid_permlink(permlink) parent_id = await cursor.get_post_id(db, author, permlink) if parent_id: child_ids = await cursor.get_child_ids(db, parent_id) if child_ids: return await load_posts(db, child_ids) return [] def nested_query_compat(function): @wraps(function) def wrapper(*args, **kwargs): if args and not kwargs and len(args) == 2 and isinstance(args[1], dict): return function(args[0], **args[1]) return function(*args, **kwargs) return wrapper @return_error_info @nested_query_compat async def get_discussions_by_trending(context, start_author: str = '', start_permlink: str = '', limit: int = 20, tag: str = None, truncate_body: int = 0, filter_tags: list = None): assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_query( context['db'], 'trending', valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100), valid_tag(tag, allow_empty=True)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_hot(context, start_author: str = '', start_permlink: str = '', limit: int = 20, tag: str = None, truncate_body: int = 0, filter_tags: list = None): assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_query( context['db'], 'hot', valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100), valid_tag(tag, allow_empty=True)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_promoted(context, start_author: str = '', start_permlink: str = '', limit: int = 20, tag: str = None, truncate_body: int = 0, filter_tags: list = None): assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_query( context['db'], 'promoted', valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100), valid_tag(tag, allow_empty=True)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_created(context, start_author: str = '', start_permlink: str = '', limit: int = 20, tag: str = None, truncate_body: int = 0, filter_tags: list = None): assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_query( context['db'], 'created', valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100), valid_tag(tag, allow_empty=True)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_blog(context, tag: str = None, start_author: str = '', start_permlink: str = '', limit: int = 20, truncate_body: int = 0, filter_tags: list = None): assert tag, '`tag` cannot be blank' assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_blog( context['db'], valid_account(tag), valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_feed(context, tag: str = None, start_author: str = '', start_permlink: str = '', limit: int = 20, truncate_body: int = 0, filter_tags: list = None): assert tag, '`tag` cannot be blank' assert not filter_tags, 'filter_tags not supported' res = await cursor.pids_by_feed_with_reblog( context['db'], valid_account(tag), valid_account(start_author, allow_empty=True), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100)) return await load_posts_reblogs(context['db'], res, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_comments(context, start_author: str = None, start_permlink: str = '', limit: int = 20, truncate_body: int = 0, filter_tags: list = None): assert start_author, '`start_author` cannot be blank' assert not filter_tags, 'filter_tags not supported' ids = await cursor.pids_by_account_comments( context['db'], valid_account(start_author), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_replies_by_last_update(context, start_author: str = None, start_permlink: str = '', limit: int = 20, truncate_body: int = 0): assert start_author, '`start_author` cannot be blank' ids = await cursor.pids_by_replies_to_account( context['db'], valid_account(start_author), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100)) return await load_posts(context['db'], ids, truncate_body=truncate_body) @return_error_info @nested_query_compat async def get_discussions_by_author_before_date(context, author: str = None, start_permlink: str = '', before_date: str = '', limit: int = 10): assert author, '`author` cannot be blank' ids = await cursor.pids_by_blog_without_reblog( context['db'], valid_account(author), valid_permlink(start_permlink, allow_empty=True), valid_limit(limit, 100)) return await load_posts(context['db'], ids) @return_error_info @nested_query_compat
MIT License
hrnet/hrnet-maskrcnn-benchmark
maskrcnn_benchmark/config/yacs.py
CfgNode.merge_from_list
python
def merge_from_list(self, cfg_list): _assert_with_logging( len(cfg_list) % 2 == 0, "Override list has odd length: {}; it must be a list of pairs".format( cfg_list ), ) root = self for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]): if root.key_is_deprecated(full_key): continue if root.key_is_renamed(full_key): root.raise_key_rename_error(full_key) key_list = full_key.split(".") d = self for subkey in key_list[:-1]: _assert_with_logging( subkey in d, "Non-existent key: {}".format(full_key) ) d = d[subkey] subkey = key_list[-1] _assert_with_logging(subkey in d, "Non-existent key: {}".format(full_key)) value = _decode_cfg_value(v) value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key) d[subkey] = value
Merge config (keys, values) in a list (e.g., from command line) into this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
https://github.com/hrnet/hrnet-maskrcnn-benchmark/blob/a807747c65cbd64edd807100da3fc2ce291fda16/maskrcnn_benchmark/config/yacs.py#L177-L204
import copy import io import logging import os from ast import literal_eval import yaml _PY2 = False _YAML_EXTS = {"", ".yaml", ".yml"} _PY_EXTS = {".py"} try: _FILE_TYPES = (file, io.IOBase) _PY2 = True except NameError: _FILE_TYPES = (io.IOBase,) _VALID_TYPES = {tuple, list, str, int, float, bool} if _PY2: _VALID_TYPES = _VALID_TYPES.union({unicode}) if _PY2: import imp else: import importlib.util logger = logging.getLogger(__name__) class CfgNode(dict): IMMUTABLE = "__immutable__" DEPRECATED_KEYS = "__deprecated_keys__" RENAMED_KEYS = "__renamed_keys__" def __init__(self, init_dict=None, key_list=None): init_dict = {} if init_dict is None else init_dict key_list = [] if key_list is None else key_list for k, v in init_dict.items(): if type(v) is dict: init_dict[k] = CfgNode(v, key_list=key_list + [k]) else: _assert_with_logging( _valid_type(v, allow_cfg_node=True), "Key {} with value {} is not a valid type; valid types: {}".format( ".".join(key_list + [k]), type(v), _VALID_TYPES ), ) super(CfgNode, self).__init__(init_dict) self.__dict__[CfgNode.IMMUTABLE] = False self.__dict__[CfgNode.DEPRECATED_KEYS] = set() self.__dict__[CfgNode.RENAMED_KEYS] = { } def __getattr__(self, name): if name in self: return self[name] else: raise AttributeError(name) def __setattr__(self, name, value): if self.is_frozen(): raise AttributeError( "Attempted to set {} to {}, but CfgNode is immutable".format( name, value ) ) _assert_with_logging( name not in self.__dict__, "Invalid attempt to modify internal CfgNode state: {}".format(name), ) _assert_with_logging( _valid_type(value, allow_cfg_node=True), "Invalid type {} for key {}; valid types = {}".format( type(value), name, _VALID_TYPES ), ) self[name] = value def __str__(self): def _indent(s_, num_spaces): s = s_.split("\n") if len(s) == 1: return s_ first = s.pop(0) s = [(num_spaces * " ") + line for line in s] s = "\n".join(s) s = first + "\n" + s return s r = "" s = [] for k, v in sorted(self.items()): seperator = "\n" if isinstance(v, CfgNode) else " " attr_str = "{}:{}{}".format(str(k), seperator, str(v)) attr_str = _indent(attr_str, 2) s.append(attr_str) r += "\n".join(s) return r def __repr__(self): return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__()) def dump(self): self_as_dict = _to_dict(self) return yaml.safe_dump(self_as_dict) def merge_from_file(self, cfg_filename): with open(cfg_filename, "r") as f: cfg = load_cfg(f) self.merge_from_other_cfg(cfg) def merge_from_other_cfg(self, cfg_other): _merge_a_into_b(cfg_other, self, self, [])
MIT License
threatconnect-inc/tcex
tcex/threat_intelligence/mappings/security_label.py
SecurityLabel.name
python
def name(self, name): self._data['name'] = name data = {'name': name} return self._tc_requests.update( self.api_type, self.api_branch, self.unique_id, data, owner=self.owner )
Updates the security labels name. Args: name:
https://github.com/threatconnect-inc/tcex/blob/dae37b73d8b33cf26360f6d25c6b305a68f2f0e2/tcex/threat_intelligence/mappings/security_label.py#L63-L73
from .mappings import Mappings module = __import__(__name__) class SecurityLabel(Mappings): def __init__(self, ti: 'ThreatIntelligenc', name, **kwargs): super().__init__( ti, main_type='SecurityLabel', api_type='securitylabels', sub_type=None, api_entity='securityLabel', api_branch=None, owner=kwargs.pop('owner', None), ) self._data['type'] = 'securityLabels' self._data['sub_type'] = None self._data['name'] = name for arg, value in kwargs.items(): self.add_key_value(arg, value) @property def as_entity(self): return {} @staticmethod def is_security_label(): return True def can_create(self): if self._data.get('name'): return True return False def add_key_value(self, key, value): self._data[key] = value
Apache License 2.0