repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
cisco-en-programmability/dnacentersdk
dnacentersdk/api/v2_2_1/tag.py
Tag.remove_tag_member
python
def remove_tag_member(self, id, member_id, headers=None, **request_parameters): check_type(headers, dict) check_type(id, basestring, may_be_none=False) check_type(member_id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, 'memberId': member_id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}/member/{memberId}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.delete(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.delete(endpoint_full_url, params=_params) return self._object_factory('bpm_cc9883be5c1cad1959347babb342_v2_2_1', json_data)
Removes Tag member from the tag specified by id. Args: id(basestring): id path parameter. Tag ID. member_id(basestring): memberId path parameter. TagMember id to be removed from tag. headers(dict): Dictionary of HTTP Headers to send with the Request . **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/api/v2_2_1/tag.py#L919-L977
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from builtins import * from past.builtins import basestring from ...restsession import RestSession from ...utils import ( check_type, dict_from_items_with_values, apply_path_params, dict_of_str, ) class Tag(object): def __init__(self, session, object_factory, request_validator): check_type(session, RestSession) super(Tag, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator def get_tag_members_by_id(self, id, member_type, level=None, limit=None, member_association_type=None, offset=None, headers=None, **request_parameters): check_type(headers, dict) check_type(member_type, basestring, may_be_none=False) check_type(offset, basestring) check_type(limit, basestring) check_type(member_association_type, basestring) check_type(level, basestring) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'memberType': member_type, 'offset': offset, 'limit': limit, 'memberAssociationType': member_association_type, 'level': level, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}/member') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_ff12c50ea3fb53c9a53f9c9e2c595d44_v2_2_1', json_data) def add_members_to_the_tag(self, id, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } _payload = { } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_dcc43be0514e50fea80cfa827f13ee5c_v2_2_1') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}/member') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_dcc43be0514e50fea80cfa827f13ee5c_v2_2_1', json_data) def get_tag(self, additional_info_attributes=None, additional_info_name_space=None, field=None, level=None, limit=None, name=None, offset=None, order=None, size=None, sort_by=None, system_tag=None, headers=None, **request_parameters): check_type(headers, dict) check_type(name, basestring) check_type(additional_info_name_space, basestring) check_type(additional_info_attributes, basestring) check_type(level, basestring) check_type(offset, basestring) check_type(limit, basestring) check_type(size, basestring) check_type(field, basestring) check_type(sort_by, basestring) check_type(order, basestring) check_type(system_tag, basestring) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'name': name, 'additionalInfo.nameSpace': additional_info_name_space, 'additionalInfo.attributes': additional_info_attributes, 'level': level, 'offset': offset, 'limit': limit, 'size': size, 'field': field, 'sortBy': sort_by, 'order': order, 'systemTag': system_tag, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_a4185f5b40aabe991f8cdb2816_v2_2_1', json_data) def update_tag(self, description=None, dynamicRules=None, id=None, instanceTenantId=None, name=None, systemTag=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'systemTag': systemTag, 'description': description, 'dynamicRules': dynamicRules, 'name': name, 'id': id, 'instanceTenantId': instanceTenantId, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_c9f995abc21b54e7860f66aef2ffbc85_v2_2_1') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_c9f995abc21b54e7860f66aef2ffbc85_v2_2_1', json_data) def create_tag(self, description=None, dynamicRules=None, id=None, instanceTenantId=None, name=None, systemTag=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'systemTag': systemTag, 'description': description, 'dynamicRules': dynamicRules, 'name': name, 'id': id, 'instanceTenantId': instanceTenantId, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_e8271b05b62c54609f74b4f2f373ad5a_v2_2_1') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_e8271b05b62c54609f74b4f2f373ad5a_v2_2_1', json_data) def get_tag_member_count(self, id, member_type, level=None, member_association_type=None, headers=None, **request_parameters): check_type(headers, dict) check_type(member_type, basestring, may_be_none=False) check_type(member_association_type, basestring) check_type(level, basestring) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'memberType': member_type, 'memberAssociationType': member_association_type, 'level': level, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}/member/count') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_ffacb52f745c15b40b9b352754e2e1_v2_2_1', json_data) def get_tag_by_id(self, id, headers=None, **request_parameters): check_type(headers, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_d65f9b9d8ad5426bdf7e55461fcf761_v2_2_1', json_data) def delete_tag(self, id, headers=None, **request_parameters): check_type(headers, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/{id}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.delete(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.delete(endpoint_full_url, params=_params) return self._object_factory('bpm_ed48fc373506cb1688cff36c2cb0f_v2_2_1', json_data) def updates_tag_membership(self, memberToTags=None, memberType=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'memberToTags': memberToTags, 'memberType': memberType, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_e3934b0fb68a5ff787e65e9b7c8e6296_v2_2_1') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/member') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_e3934b0fb68a5ff787e65e9b7c8e6296_v2_2_1', json_data) def get_tag_resource_types(self, headers=None, **request_parameters): check_type(headers, dict) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/member/type') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_baf47897d525e5899f62e4d5bdd260b_v2_2_1', json_data) def get_tag_count(self, attribute_name=None, level=None, name=None, name_space=None, size=None, system_tag=None, headers=None, **request_parameters): check_type(headers, dict) check_type(name, basestring) check_type(name_space, basestring) check_type(attribute_name, basestring) check_type(level, basestring) check_type(size, basestring) check_type(system_tag, basestring) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'name': name, 'nameSpace': name_space, 'attributeName': attribute_name, 'level': level, 'size': size, 'systemTag': system_tag, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/count') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_afb52259f7c3501ca4d8ccd277828658_v2_2_1', json_data)
MIT License
mdabrowski1990/uds
uds/messages/uds_message.py
UdsMessage.addressing
python
def addressing(self) -> AddressingType: return self.__addressing
Addressing type for which this message is relevant.
https://github.com/mdabrowski1990/uds/blob/6fd4a994785565c099fca7cb7cc3f768f7311c92/uds/messages/uds_message.py#L51-L53
__all__ = ["UdsMessage", "UdsMessageRecord"] from typing import Any from uds.utilities import RawBytes, RawBytesTuple, validate_raw_bytes, ReassignmentError, TimeStamp from .transmission_attributes import AddressingType, AddressingMemberTyping, TransmissionDirection from .uds_packet import AbstractUdsPacketRecord, PacketsRecordsTuple, PacketsRecordsSequence class UdsMessage: def __init__(self, payload: RawBytes, addressing: AddressingMemberTyping) -> None: self.payload = payload self.addressing = addressing @property def payload(self) -> RawBytesTuple: return self.__payload @payload.setter def payload(self, value: RawBytes): validate_raw_bytes(value) self.__payload = tuple(value) @property
MIT License
signalfx/signalfx-python
signalfx/pyformance/registry.py
hist_calls
python
def hist_calls(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): _histogram = histogram( "%s_calls" % pyformance.registry.get_qualname(fn)) rtn = fn(*args, **kwargs) if type(rtn) in (int, float): _histogram.add(rtn) return rtn return wrapper
Decorator to check the distribution of return values of a function.
https://github.com/signalfx/signalfx-python/blob/2d07b0f0ffb91ccba7071eafab306673c3d71cb7/signalfx/pyformance/registry.py#L161-L173
import functools from .metadata import MetricMetadata import pyformance.registry import re import time from pyformance.registry import (clear, count_calls, dump_metrics, global_registry, meter_calls, set_global_registry, time_calls) class MetricsRegistry(pyformance.registry.MetricsRegistry): def __init__(self, clock=time): self.metadata = MetricMetadata() super(MetricsRegistry, self).__init__(clock=clock) def add(self, key, metric, **dims): return super(MetricsRegistry, self).add( self.metadata.register(key, **dims), metric) def counter(self, key, **dims): return super(MetricsRegistry, self).counter( self.metadata.register(key, **dims)) def histogram(self, key, **dims): return super(MetricsRegistry, self).histogram( self.metadata.register(key, **dims)) def gauge(self, key, gauge=None, default=float("nan"), **dims): return super(MetricsRegistry, self).gauge( self.metadata.register(key, **dims), gauge=gauge, default=default) def meter(self, key, **dims): return super(MetricsRegistry, self).meter( self.metadata.register(key, **dims)) def timer(self, key, **dims): return super(MetricsRegistry, self).timer( self.metadata.register(key, **dims)) def clear(self): self.metadata.clear() super(MetricsRegistry, self).clear() set_global_registry(MetricsRegistry()) class RegexRegistry(MetricsRegistry): def __init__(self, pattern=None, clock=time): super(RegexRegistry, self).__init__(clock) if pattern is not None: self.pattern = re.compile(pattern) else: self.pattern = re.compile('^$') def _get_key(self, key): matches = self.pattern.finditer(key) key = '/'.join((v for match in matches for v in match.groups() if v)) return key def timer(self, key, **dims): return super(RegexRegistry, self).timer(self._get_key(key), **dims) def histogram(self, key, **dims): return super(RegexRegistry, self).histogram(self._get_key(key), **dims) def counter(self, key, **dims): return super(RegexRegistry, self).counter(self._get_key(key), **dims) def gauge(self, key, gauge=None, default=float("nan"), **dims): return super(RegexRegistry, self).gauge( self._get_key(key), gauge=gauge, default=default, **dims) def meter(self, key, **dims): return super(RegexRegistry, self).meter(self._get_key(key), **dims) def counter(key, **dims): return global_registry().counter(key, **dims) def histogram(key, **dims): return global_registry().histogram(key, **dims) def meter(key, **dims): return global_registry().meter(key, **dims) def timer(key, **dims): return global_registry().timer(key, **dims) def gauge(key, gauge=None, default=float("nan"), **dims): return global_registry().gauge(key, gauge=gauge, default=default, **dims) def count_calls_with_dims(**dims): def counter_wrapper(fn): @functools.wraps(fn) def fn_wrapper(*args, **kwargs): counter("%s_calls" % pyformance.registry.get_qualname(fn), **dims).inc() return fn(*args, **kwargs) return fn_wrapper return counter_wrapper def meter_calls_with_dims(**dims): def meter_wrapper(fn): @functools.wraps(fn) def fn_wrapper(*args, **kwargs): meter("%s_calls" % pyformance.registry.get_qualname(fn), **dims).mark() return fn(*args, **kwargs) return fn_wrapper return meter_wrapper
Apache License 2.0
byceps/byceps
byceps/services/site/service.py
get_all_sites
python
def get_all_sites() -> set[Site]: sites = db.session.query(DbSite).all() return {_db_entity_to_site(site) for site in sites}
Return all sites.
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/services/site/service.py#L138-L142
from __future__ import annotations import dataclasses from typing import Optional, Union from ...database import db from ...typing import BrandID, PartyID from ..board.transfer.models import BoardID from ..brand import service as brand_service from ..news import channel_service as news_channel_service from ..news.transfer.models import ChannelID as NewsChannelID from ..shop.storefront.transfer.models import StorefrontID from .dbmodels.site import Site as DbSite from .dbmodels.setting import Setting as DbSetting from .transfer.models import Site, SiteID, SiteWithBrand class UnknownSiteId(Exception): pass def create_site( site_id: SiteID, title: str, server_name: str, brand_id: BrandID, *, enabled: bool = False, user_account_creation_enabled: bool = False, login_enabled: bool = False, party_id: Optional[PartyID] = None, board_id: Optional[BoardID] = None, storefront_id: Optional[StorefrontID] = None, ) -> Site: site = DbSite( site_id, title, server_name, brand_id, enabled, user_account_creation_enabled, login_enabled, party_id=party_id, board_id=board_id, storefront_id=storefront_id, ) db.session.add(site) db.session.commit() return _db_entity_to_site(site) def update_site( site_id: SiteID, title: str, server_name: str, brand_id: BrandID, party_id: Optional[PartyID], enabled: bool, user_account_creation_enabled: bool, login_enabled: bool, board_id: Optional[BoardID], storefront_id: Optional[StorefrontID], archived: bool, ) -> Site: site = _get_db_site(site_id) site.title = title site.server_name = server_name site.brand_id = brand_id site.party_id = party_id site.enabled = enabled site.user_account_creation_enabled = user_account_creation_enabled site.login_enabled = login_enabled site.board_id = board_id site.storefront_id = storefront_id site.archived = archived db.session.commit() return _db_entity_to_site(site) def delete_site(site_id: SiteID) -> None: db.session.query(DbSetting) .filter_by(site_id=site_id) .delete() db.session.query(DbSite) .filter_by(id=site_id) .delete() db.session.commit() def _find_db_site(site_id: SiteID) -> Optional[DbSite]: return db.session.query(DbSite).get(site_id) def _get_db_site(site_id: SiteID) -> DbSite: site = _find_db_site(site_id) if site is None: raise UnknownSiteId(site_id) return site def find_site(site_id: SiteID) -> Optional[Site]: site = _find_db_site(site_id) if site is None: return None return _db_entity_to_site(site) def get_site(site_id: SiteID) -> Site: site = _get_db_site(site_id) return _db_entity_to_site(site)
BSD 3-Clause New or Revised License
openshift/doozer
doozerlib/util.py
is_commit_in_public_upstream
python
def is_commit_in_public_upstream(revision: str, public_upstream_branch: str, source_dir: str): cmd = ["git", "merge-base", "--is-ancestor", "--", revision, "public_upstream/" + public_upstream_branch] rc, out, err = exectools.cmd_gather(cmd) if rc == 0: return True if rc == 1: return False raise IOError( f"Couldn't determine if the commit {revision} is in the public upstream source repo. `git merge-base` exited with {rc}, stdout={out}, stderr={err}")
Determine if the public upstream branch includes the specified commit. :param revision: Git commit hash or reference :param public_upstream_branch: Git branch of the public upstream source :param source_dir: Path to the local Git repository
https://github.com/openshift/doozer/blob/9381af244e5d6533d45868bbab1a71888cc7d1d1/doozerlib/util.py#L185-L202
import copy import functools import json import os import pathlib import re import urllib.parse from collections import deque from contextlib import contextmanager from datetime import datetime from inspect import getframeinfo, stack from itertools import chain from os.path import abspath from pathlib import Path from sys import getsizeof, stderr from typing import Dict, Iterable, List, Optional, Tuple, Union import click import semver import yaml try: from reprlib import repr except ImportError: pass from doozerlib import constants, exectools def stringify(val): try: val = val.decode('utf-8') except (UnicodeDecodeError, AttributeError): pass return val def red_prefix(msg, file=None): click.secho(stringify(msg), nl=False, bold=True, fg='red', file=file) def red_print(msg, file=None): click.secho(stringify(msg), nl=True, bold=False, fg='red', file=file) def green_prefix(msg, file=None): click.secho(stringify(msg), nl=False, bold=True, fg='green', file=file) def green_print(msg, file=None): click.secho(stringify(msg), nl=True, bold=False, fg='green', file=file) def yellow_prefix(msg, file=None): click.secho(stringify(msg), nl=False, bold=True, fg='yellow', file=file) def yellow_print(msg, file=None): click.secho(stringify(msg), nl=True, bold=False, fg='yellow', file=file) def cprint(msg, file=None): click.echo(stringify(msg), file=file) def color_print(msg, color='white', nl=True, file=None): click.secho(stringify(msg), nl=nl, bold=False, fg=color, file=file) DICT_EMPTY = object() def dict_get(dct, path, default=DICT_EMPTY): dct = copy.deepcopy(dct) for key in path.split('.'): try: dct = dct[key] except KeyError: if default is DICT_EMPTY: raise Exception('Unable to follow key path {}'.format(path)) return default return dct def remove_prefix(s: str, prefix: str) -> str: if s.startswith(prefix): return s[len(prefix):] else: return s[:] def remove_prefixes(s: str, *args) -> str: for prefix in args: s = remove_prefix(s, prefix) return s def remove_suffix(s: str, suffix: str) -> str: if suffix and s.endswith(suffix): return s[:-len(suffix)] else: return s[:] def convert_remote_git_to_https(source_url: str): url = source_url.strip().rstrip('/') url = remove_prefixes(url, 'http://', 'https://', 'git://', 'git@', 'ssh://') url = remove_suffix(url, '.git') url = url.split('@', 1)[-1] if url.find(':') > -1: server, org_repo = url.rsplit(':', 1) elif url.rfind('/') > -1: server, org_repo = url.rsplit('/', 1) else: return f'https://{url}' return f'https://{server}/{org_repo}' def split_git_url(url) -> (str, str, str): https_normalized = convert_remote_git_to_https(url) url = https_normalized[8:] server, repo = url.split('/', 1) org, repo_name = repo.split('/', 1) return server, org, repo_name def convert_remote_git_to_ssh(url): server, org, repo_name = split_git_url(url) return f'git@{server}:{org}/{repo_name}.git' def setup_and_fetch_public_upstream_source(public_source_url: str, public_upstream_branch: str, source_dir: str): out, err = exectools.cmd_assert(["git", "-C", source_dir, "remote"]) if 'public_upstream' not in out.strip().split(): exectools.cmd_assert(["git", "-C", source_dir, "remote", "add", "--", "public_upstream", public_source_url]) else: exectools.cmd_assert(["git", "-C", source_dir, "remote", "set-url", "--", "public_upstream", public_source_url]) exectools.cmd_assert(["git", "-C", source_dir, "fetch", "--", "public_upstream", public_upstream_branch], retries=3, set_env=constants.GIT_NO_PROMPTS)
Apache License 2.0
buzzlawless/ynab-live-import
lambda_functions/postToYnab.py
is_duplicate_invocation
python
def is_duplicate_invocation(message_id): response = ddbclient.get_item(TableName=TABLE_NAME, Key={'message_id': {'S': message_id}}, ConsistentRead=True) return 'Item' not in response
Checks if this lambda function has already been invoked for this transaction
https://github.com/buzzlawless/ynab-live-import/blob/93c1e4e58b267ddb6c65fdb62b6b3ae7fe96cb30/lambda_functions/postToYnab.py#L37-L43
import os import sys import json import boto3 import botocore import requests s3client = boto3.client('s3') BUCKET_NAME = os.getenv('bucket_name') ddbclient = boto3.client('dynamodb') TABLE_NAME = os.getenv('table_name') PERSONAL_ACCESS_TOKEN = os.getenv('personal_access_token') BUDGET_ID = os.getenv('budget_id') TIMEOUT = 5 def lambda_handler(event, context): for record in event['Records']: if record['eventName'] == 'INSERT': image = record['dynamodb']['NewImage'] message_id = image['message_id']['S'] if is_duplicate_invocation(message_id): continue cleanup(message_id) transaction_data = {'transaction': { 'account_id': get_account_id(image['last_digits']['S']), 'date': image['date']['S'], 'amount': to_milliunits(image['amount']['S']), 'payee_name': image['payee']['S'], 'cleared': 'uncleared'} } post_transaction(transaction_data)
MIT License
czbiohub/microdl
micro_dl/plotting/plot_utils.py
save_mask_overlay
python
def save_mask_overlay(input_image, mask, op_fname, alpha=0.7): assert 0 <= alpha <= 1, 'alpha must be between 0 and 1' fig, ax = plt.subplots(1, 3) fig.set_size_inches((15, 5)) ax[0].imshow(input_image, cmap='gray') ax[0].axis('off') ax[1].imshow(mask, cmap='gray') ax[1].axis('off') im_rgb = input_image / input_image.max() * 255 im_rgb = im_rgb.astype(np.uint8) im_rgb = cv2.cvtColor(im_rgb, cv2.COLOR_GRAY2RGB) try: _, contours, _ = cv2.findContours( mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, ) except ValueError: contours, _ = cv2.findContours( mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, ) im_rgb = cv2.drawContours(im_rgb, contours, -1, (0, 255, 0), 2) ax[2].imshow(im_rgb) ax[2].axis('off') fig.savefig(op_fname, dpi=250) plt.close(fig)
Plot and save a collage of input, mask, overlay :param np.array input_image: 2D input image :param np.array mask: 2D mask image :param str op_fname: fname will full path for saving the collage as a jpg :param int alpha: opacity/transparency for the mask overlay
https://github.com/czbiohub/microdl/blob/d321807137f986a88f75258e44415206c7f6e481/micro_dl/plotting/plot_utils.py#L196-L235
import cv2 import glob import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import natsort import numpy as np import os from micro_dl.utils.normalize import hist_clipping def save_predicted_images(input_batch, target_batch, pred_batch, output_dir, batch_idx=None, output_fname=None, ext='jpg', clip_limits=1, font_size=15): if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) batch_size = len(input_batch) if batch_size == 1: assert output_fname is not None, 'need fname for saving image' fname = os.path.join(output_dir, '{}.{}'.format(output_fname, ext)) if batch_size != 1: assert len(input_batch.shape) == 4, 'saves 2D images only' for img_idx in range(batch_size): cur_input = input_batch[img_idx] cur_target = target_batch[img_idx] cur_prediction = pred_batch[img_idx] n_channels = cur_input.shape[0] fig, ax = plt.subplots(n_channels, 3) fig.set_size_inches((15, 5 * n_channels)) axis_count = 0 for channel_idx in range(n_channels): cur_im = hist_clipping( cur_input[channel_idx], clip_limits, 100 - clip_limits, ) ax[axis_count].imshow(cur_im, cmap='gray') ax[axis_count].axis('off') if axis_count == 0: ax[axis_count].set_title('Input', fontsize=font_size) axis_count += 1 cur_im = cur_target[channel_idx] ax[axis_count].imshow(cur_im, cmap='gray') ax[axis_count].axis('off') if axis_count == 1: ax[axis_count].set_title('Target', fontsize=font_size) axis_count += 1 cur_im = hist_clipping( cur_prediction[channel_idx], clip_limits, 100 - clip_limits, ) ax[axis_count].imshow(cur_im, cmap='gray') ax[axis_count].axis('off') if axis_count == 2: ax[axis_count].set_title('Prediction', fontsize=font_size) axis_count += 1 if batch_size != 1: fname = os.path.join( output_dir, '{}.{}'.format(str(batch_idx * batch_size + img_idx), ext) ) fig.savefig(fname, dpi=300, bbox_inches='tight') plt.close(fig) def save_center_slices(image_dir, pos_idx, save_path, mean_std=None, clip_limits=1, margin=20, z_scale=5, z_range=None, channel_str=None, font_size=15, color_map='gray', fig_title=None): search_str = os.path.join(image_dir, "*p{:03d}*".format(pos_idx)) slice_names = natsort.natsorted(glob.glob(search_str)) if channel_str is not None: slice_names = [s for s in slice_names if channel_str in s] if z_range is not None: assert len(z_range) == 2, 'Z-range must consist of two values' slice_names = slice_names[z_range[0]:z_range[1]] assert len(slice_names) > 0, "Couldn't find images with given search criteria" im_stack = [] for im_z in slice_names: im_stack.append(cv2.imread(im_z, cv2.IMREAD_ANYDEPTH)) im_stack = np.stack(im_stack, axis=-1) im_norm = im_stack if isinstance(mean_std, tuple): im_norm = im_stack / im_stack.std() * mean_std[0] im_norm = im_norm - im_norm.mean() + mean_std[1] im_norm[im_norm < 0] = 0. im_norm = im_norm.astype(np.uint16) center_slice = hist_clipping( im_norm[..., int(len(slice_names) // 2)], clip_limits, 100 - clip_limits, ) im_shape = im_stack.shape canvas = center_slice.max() * np.ones( (im_shape[0] + im_shape[2] * z_scale + margin, im_shape[1] + im_shape[2] * z_scale + margin), dtype=np.uint16, ) canvas[0:im_shape[0], 0:im_shape[1]] = center_slice yz_slice = hist_clipping( np.squeeze(im_norm[:, int(im_shape[1] // 2), :]), clip_limits, 100 - clip_limits, ) yz_shape = yz_slice.shape yz_slice = cv2.resize(yz_slice, (yz_shape[1] * int(z_scale), yz_shape[0])) canvas[0:yz_shape[0], im_shape[1] + margin:] = yz_slice xy_slice = hist_clipping( np.squeeze(im_norm[int(im_shape[1] // 2), :, :]), clip_limits, 100 - clip_limits, ) xy_shape = xy_slice.shape xy_slice = cv2.resize(xy_slice, (xy_shape[1] * int(z_scale), xy_shape[0])) xy_slice = np.rot90(xy_slice) canvas[im_shape[0] + margin:, 0:xy_slice.shape[1]] = xy_slice plt.imshow(canvas, cmap=color_map) plt.axis('off') if fig_title is not None: plt.title(fig_title, fontsize=font_size) plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close()
MIT License
tianzerl/pyanime4k
pyanime4k/ac.py
AC.list_GPUs
python
def list_GPUs(): c_length = ctypes.c_size_t() c_ac.acListGPUs(None, ctypes.pointer(c_length), None, None) length = c_length.value info = (ctypes.c_char * length)() c_ac.acListGPUs(info, None, None, None) print(ctypes.string_at(info).decode())
print platforms and GPUs info
https://github.com/tianzerl/pyanime4k/blob/dc379805819db0718d6b044e59f2e6497af48f1d/pyanime4k/ac.py#L523-L532
from pyanime4k.wrapper import * from pyanime4k.error import ACError import numpy as np import multiprocessing (AC_INPUT_BGR, AC_INPUT_RGB, AC_INPUT_YUV444, AC_INPUT_RGB32, AC_INPUT_BGR32) = ( 0, 1, 2, 3, 4, ) (AC_PROCESS_PAUSED, AC_PROCESS_STOP, AC_PROCESS_RUNNING) = (0, 1, 2) class Version(object): pyanime4k = "2.5.2" def __init__(self): ac_version = c_ac.acGetVersion() self.core = str(ac_version.coreVersion, "utf-8") self.wrapper = str(ac_version.wrapperVersion, "utf-8") def __str__(self): return ( f"Version information:\n" f" pyanime4k: {self.pyanime4k}\n" f" core: {self.core}\n" f" C wrapper: {self.wrapper}" ) class Parameters(object): def __init__(self): self.passes = 2 self.pushColorCount = 2 self.strengthColor = 0.3 self.strengthGradient = 1.0 self.zoomFactor = 2.0 self.fastMode = False self.videoMode = False self.preprocessing = False self.postprocessing = False self.preFilters = 4 self.postFilters = 40 self.maxThreads = multiprocessing.cpu_count() self.HDN = False self.HDNLevel = 1 self.alpha = False def reset(self): self.passes = 2 self.pushColorCount = 2 self.strengthColor = 0.3 self.strengthGradient = 1.0 self.zoomFactor = 2.0 self.fastMode = False self.videoMode = False self.preprocessing = False self.postprocessing = False self.preFilters = 4 self.postFilters = 40 self.maxThreads = multiprocessing.cpu_count() self.HDN = False self.HDNLevel = 1 self.alpha = False def print_info(self): print( "passes: %d\n" "pushColorCount: %d\n" "strengthColor: %.2f\n" "strengthGradient: %.2f\n" "zoomFactor: %.2f\n" "fastMode: %r\n" "videoMode: %r\n" "preprocessing: %r\n" "postprocessing: %r\n" "preFilters: %d\n" "postFilters: %d\n" "maxThreads: %d\n" "HDN: %r\n" "HDNLevel %r\n" "alpha %r" % ( self.passes, self.pushColorCount, self.strengthColor, self.strengthGradient, self.zoomFactor, self.fastMode, self.videoMode, self.preprocessing, self.postprocessing, self.preFilters, self.postFilters, self.maxThreads, self.HDN, self.HDNLevel, self.alpha, ) ) class ProcessorType(object): CPU_Anime4K09 = AC_CPU_Anime4K09 CPU_ACNet = AC_CPU_ACNet OpenCL_Anime4K09 = AC_OpenCL_Anime4K09 OpenCL_ACNet = AC_OpenCL_ACNet Cuda_Anime4K09 = AC_Cuda_Anime4K09 Cuda_ACNet = AC_Cuda_ACNet type_code_str = { CPU_Anime4K09: " CPU_Anime4K09", CPU_ACNet: "CPU_ACNet", OpenCL_Anime4K09: "OpenCL_Anime4K09", OpenCL_ACNet: "OpenCL_ACNet", Cuda_Anime4K09: "Cuda_Anime4K09", Cuda_ACNet: "Cuda_ACNet", } class Codec(object): OTHER = AC_OTHER MP4V = AC_MP4V DXVA = AC_DXVA AVC1 = AC_AVC1 VP09 = AC_VP09 HEVC = AC_HEVC AV01 = AC_AV01 class CNNType(object): AC_Default = 0 AC_ACNetHDNL0 = 1 AC_ACNetHDNL1 = 2 AC_ACNetHDNL2 = 3 AC_ACNetHDNL3 = 4 class GPGPUModel(object): AC_CUDA = 0 AC_OpenCL = 1 class OpenCLAnime4K09Manager(object): def __init__( self, pID: int = 0, dID: int = 0, OpenCLQueueNum: int = 4, OpenCLParallelIO: bool = False, ): self.pID = pID self.dID = dID self.OpenCLQueueNum = OpenCLQueueNum self.OpenCLParallelIO = OpenCLParallelIO def _get_manager_data(self): manager_data = ac_OpenCLAnime4K09Data() manager_data.pID = ctypes.c_uint(self.pID) manager_data.dID = ctypes.c_uint(self.dID) manager_data.OpenCLQueueNum = ctypes.c_int(self.OpenCLQueueNum) manager_data.OpenCLParallelIO = ctypes.c_int(self.OpenCLParallelIO) return manager_data class OpenCLACNetManager(object): def __init__( self, pID: int = 0, dID: int = 0, CNN: int = CNNType.AC_Default, OpenCLQueueNum: int = 4, OpenCLParallelIO: bool = False, ): self.pID = pID self.dID = dID self.CNNType = CNN self.OpenCLQueueNum = OpenCLQueueNum self.OpenCLParallelIO = OpenCLParallelIO def _get_manager_data(self): manager_data = ac_OpenCLACNetData() manager_data.pID = ctypes.c_uint(self.pID) manager_data.dID = ctypes.c_uint(self.dID) manager_data.OpenCLQueueNum = ctypes.c_int(self.OpenCLQueueNum) manager_data.OpenCLParallelIO = ctypes.c_int(self.OpenCLParallelIO) manager_data.CNNType = ctypes.c_int(self.CNNType) return manager_data class CUDAManager(object): def __init__(self, dID: int = 0): self.dID = dID def _get_manager_data(self): manager_data = ac_CUDAData() manager_data.dID = ctypes.c_uint(self.dID) return manager_data class ManagerList(object): def __init__(self, managerList: list): self._manager_data = ac_managerData() self._manager_mask = 0 for manager in managerList: if isinstance(manager, OpenCLAnime4K09Manager): self._manager_mask |= ac_manager.AC_Manager_OpenCL_Anime4K09 self._manager_data.OpenCLAnime4K09Data = ctypes.pointer( manager._get_manager_data() ) elif isinstance(manager, OpenCLACNetManager): self._manager_mask |= ac_manager.AC_Manager_OpenCL_ACNet self._manager_data.OpenCLACNetData = ctypes.pointer( manager._get_manager_data() ) elif isinstance(manager, CUDAManager): self._manager_mask |= ac_manager.AC_Manager_Cuda self._manager_data.CUDAData = ctypes.pointer( manager._get_manager_data() ) def get_managers(self): return self._manager_mask def get_manager_data(self): return self._manager_data class AC(object): def __get_c_parameters(self, parameters): c_struct = ac_parameters() c_struct.passes = ctypes.c_int(parameters.passes) c_struct.pushColorCount = ctypes.c_int(parameters.pushColorCount) c_struct.strengthColor = ctypes.c_double(parameters.strengthColor) c_struct.strengthGradient = ctypes.c_double(parameters.strengthGradient) c_struct.zoomFactor = ctypes.c_double(parameters.zoomFactor) c_struct.fastMode = ctypes.c_int(parameters.fastMode) c_struct.videoMode = ctypes.c_int(parameters.videoMode) c_struct.preprocessing = ctypes.c_int(parameters.preprocessing) c_struct.postprocessing = ctypes.c_int(parameters.postprocessing) c_struct.preFilters = ctypes.c_uint8(parameters.preFilters) c_struct.postFilters = ctypes.c_uint8(parameters.postFilters) c_struct.maxThreads = ctypes.c_uint(parameters.maxThreads) c_struct.HDN = ctypes.c_int(parameters.HDN) c_struct.HDNLevel = ctypes.c_int(parameters.HDNLevel) c_struct.alpha = ctypes.c_int(parameters.alpha) return c_struct def __init__( self, managerList: ManagerList = None, parameters: Parameters = Parameters(), type: ProcessorType = ProcessorType.CPU_ACNet, ): err = ctypes.c_int(AC_OK) self.ac_object = c_ac.acGetInstance2( ctypes.c_uint(managerList.get_managers() if managerList is not None else 0), ctypes.byref(managerList.get_manager_data()) if managerList is not None else None, ctypes.byref(self.__get_c_parameters(parameters)), ctypes.c_int(type), ctypes.pointer(err), ) if err.value != AC_OK: raise ACError(err.value) self.parameters = parameters self.ac_object = ctypes.c_void_p(self.ac_object) self.input_type = AC_INPUT_BGR self.processor_type = type self.process_status = AC_PROCESS_STOP def __del__(self): c_ac.acFreeInstance2(self.ac_object) @staticmethod def get_version() -> Version: return Version() def get_processor_type(self) -> str: return ProcessorType.type_code_str[self.processor_type] def set_video_mode(self, flag: bool = True): err = c_ac.acSetVideoMode(self.ac_object, ctypes.c_int(flag)) if err != AC_OK: raise ACError(err) self.parameters.videoMode = flag def set_arguments(self, parameters: Parameters): err = c_ac.acSetArguments( self.ac_object, ctypes.byref(self.__get_c_parameters(parameters)) ) if err != AC_OK: raise ACError(err) self.parameters = parameters def load_image(self, src_path: str): err = c_ac.acLoadImage(self.ac_object, ctypes.c_char_p(src_path.encode('utf-8'))) if err != AC_OK: raise ACError(err) def load_video(self, src_path: str): if self.parameters.videoMode is False: raise ACError(AC_ERROR_VIDEO_MODE_UNINIT) err = c_ac.acLoadVideo(self.ac_object, ctypes.c_char_p(src_path.encode('utf-8'))) if err != AC_OK: raise ACError(err) def set_save_video_info( self, dst_path: str, codec: Codec = Codec.MP4V, fps: float = 0 ): err = c_ac.acSetSaveVideoInfo( self.ac_object, ctypes.c_char_p(dst_path.encode('utf-8')), ctypes.c_int(codec), ctypes.c_double(fps), ) if err != AC_OK: raise ACError(err) def process(self): self.process_status = AC_PROCESS_RUNNING err = c_ac.acProcess(self.ac_object) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_STOP def process_with_progress(self): self.process_status = AC_PROCESS_RUNNING err = c_ac.acProcessWithPrintProgress(self.ac_object) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_STOP def process_with_progress_callback(self, func): self.process_status = AC_PROCESS_RUNNING c_callback = ctypes.CFUNCTYPE(None, ctypes.c_double) err = c_ac.acProcessWithProgress(self.ac_object, c_callback(func)) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_STOP def process_with_progress_time_callback(self, func): self.process_status = AC_PROCESS_RUNNING c_callback = ctypes.CFUNCTYPE(None, ctypes.c_double, ctypes.c_double) err = c_ac.acProcessWithProgressTime(self.ac_object, c_callback(func)) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_STOP def pause_video_process(self): err = c_ac.acPauseVideoProcess(self.ac_object) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_PAUSED def continue_video_process(self): err = c_ac.acContinueVideoProcess(self.ac_object) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_RUNNING def stop_video_process(self): err = c_ac.acStopVideoProcess(self.ac_object) if err != AC_OK: raise ACError(err) self.process_status = AC_PROCESS_STOP def get_process_status(self) -> int: return self.process_status def save_image(self, dst_path: str): err = c_ac.acSaveImage(self.ac_object, ctypes.c_char_p(dst_path.encode('utf-8'))) if err != AC_OK: raise ACError(err) def save_video(self): err = c_ac.acSaveVideo(self.ac_object) if err != AC_OK: raise ACError(err) def show_image(self): err = c_ac.acShowImage(self.ac_object, ctypes.c_int(self.input_type)) if err != AC_OK: raise ACError(err) def init_GPU(self): err = c_ac.acInitGPU() if err != AC_OK: raise ACError(err) def release_GPU(self): c_ac.acReleaseGPU() def is_initialized_GPU(self) -> bool: flag = c_ac.acIsInitializedGPU() return bool(flag) def init_GPUCNN(self): err = c_ac.acInitGPUCNN() if err != AC_OK: raise ACError(err) def release_GPUCNN(self): c_ac.acReleaseGPUCNN() def is_initialized_GPUCNN(self) -> bool: flag = c_ac.acIsInitializedGPUCNN() return bool(flag) @staticmethod
MIT License
hackertogether/tiktok-crawler
tiktok/hot/trend.py
trend
python
def trend(hot_trend_url, count, **kwargs): offset = 0 while True: common_parameter['cursor'] = str(offset) result = fetch(hot_trend_url, params=common_parameter, **kwargs) category_list = result.get('category_list') datetime = parse_datetime(result.get('extra', {}).get('now')) final = [] for item in category_list: if item.get('desc') == '热门话题': final.append(data_to_topic(item.get('challenge_info', {}))) if item.get('desc') == '热门音乐': final.append(data_to_music(item.get('music_info', {}))) yield HotTrend(datetime=datetime, data=final, offset=offset, count=count) offset += 10
get trend result :return:
https://github.com/hackertogether/tiktok-crawler/blob/eba5bb2b0ecf9e9d82084609d04ab53fc1747121/tiktok/hot/trend.py#L8-L27
from tiktok.utils import fetch from tiktok.config import common_parameter from tiktok.utils.tranform import data_to_music, data_to_topic from tiktok.structures.hot import HotTrend from tiktok.utils.common import parse_datetime
MIT License
semiversus/python-broqer
broqer/coro_queue.py
CoroQueue._start_task
python
def _start_task(self, args: Tuple, future: asyncio.Future): if self._mode is AsyncMode.LAST_DISTINCT and args == self._last_args: self._task = None future.set_result(NONE) return self._last_args = args self._task = asyncio.ensure_future(self._coro(*args)) self._task.add_done_callback(partial(self._handle_done, future))
Start the coroutine as task
https://github.com/semiversus/python-broqer/blob/131a78b4e475c4134bc32e035b833c8b162cdff2/broqer/coro_queue.py#L95-L109
import asyncio from collections import deque from enum import Enum from typing import Any, Deque, Optional, Tuple from functools import partial from broqer import NONE def wrap_coro(coro, unpack, *args, **kwargs): if unpack: async def _coro(value): return await coro(*args, *value, **kwargs) else: async def _coro(value): return await coro(*args, value, **kwargs) return _coro class AsyncMode(Enum): CONCURRENT = 1 INTERRUPT = 2 QUEUE = 3 LAST = 4 LAST_DISTINCT = 5 SKIP = 6 class CoroQueue: def __init__(self, coro, mode=AsyncMode.CONCURRENT): self._coro = coro self._mode = mode self._last_args = None self._task = None self._queue = None if mode in (AsyncMode.QUEUE, AsyncMode.LAST, AsyncMode.LAST_DISTINCT): maxlen = (None if mode is AsyncMode.QUEUE else 1) self._queue = deque(maxlen=maxlen) def schedule(self, *args: Any) -> asyncio.Future: future = asyncio.Future() if self._task is not None: if self._queue is not None: if self._queue.maxlen == 1 and len(self._queue) == 1: _, queued_future = self._queue.popleft() queued_future.set_result(NONE) self._queue.append((args, future)) return future if self._mode is AsyncMode.SKIP: future.set_result(NONE) return future if self._mode is AsyncMode.INTERRUPT and not self._task.done(): self._task.cancel() self._start_task(args, future) return future
MIT License
2ndwatch/cloudendure-python
cloudendure/cloudendure_api/models/cloud_endure_job.py
CloudEndureJob.end_date_time
python
def end_date_time(self): return self._end_date_time
Gets the end_date_time of this CloudEndureJob. # noqa: E501 :return: The end_date_time of this CloudEndureJob. # noqa: E501 :rtype: datetime
https://github.com/2ndwatch/cloudendure-python/blob/f81d1be1422b7c19adedb06c584803eaaa811919/cloudendure/cloudendure_api/models/cloud_endure_job.py#L205-L212
import pprint import re import six class CloudEndureJob: """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { "status": "str", "participating_machines": "list[str]", "log": "list[object]", "type": "str", "end_date_time": "datetime", "creation_date_time": "datetime", "id": "str", "initiated_by": "str", } attribute_map = { "status": "status", "participating_machines": "participatingMachines", "log": "log", "type": "type", "end_date_time": "endDateTime", "creation_date_time": "creationDateTime", "id": "id", "initiated_by": "initiatedBy", } def __init__( self, status=None, participating_machines=None, log=None, type=None, end_date_time=None, creation_date_time=None, id=None, initiated_by=None, ): self._status = None self._participating_machines = None self._log = None self._type = None self._end_date_time = None self._creation_date_time = None self._id = None self._initiated_by = None self.discriminator = None if status is not None: self.status = status if participating_machines is not None: self.participating_machines = participating_machines if log is not None: self.log = log if type is not None: self.type = type if end_date_time is not None: self.end_date_time = end_date_time if creation_date_time is not None: self.creation_date_time = creation_date_time if id is not None: self.id = id if initiated_by is not None: self.initiated_by = initiated_by @property def status(self): return self._status @status.setter def status(self, status): allowed_values = ["PENDING", "STARTED", "COMPLETED", "FAILED"] if status not in allowed_values: raise ValueError( "Invalid value for `status` ({0}), must be one of {1}".format( status, allowed_values ) ) self._status = status @property def participating_machines(self): return self._participating_machines @participating_machines.setter def participating_machines(self, participating_machines): self._participating_machines = participating_machines @property def log(self): return self._log @log.setter def log(self, log): self._log = log @property def type(self): return self._type @type.setter def type(self, type): allowed_values = [ "CLEANUP", "CUTOVER_LAUNCH", "RECOVERY_LAUNCH", "RESTORE_LAUNCH", "TEST_LAUNCH", "CONSOLIDATE_VMDKS", "FILE_RESTORE", "RECOVERY_PLAN_RECOVERY", "RECOVERY_PLAN_TEST", "RECOVERY_PLAN_CUTOVER", "RECOVERY_PLAN_CLEANUP", ] if type not in allowed_values: raise ValueError( "Invalid value for `type` ({0}), must be one of {1}".format( type, allowed_values ) ) self._type = type @property
MIT License
mit-pdos/secfs-skeleton
secfs/fs.py
read
python
def read(read_as, i, off, size): if not isinstance(i, I): raise TypeError("{} is not an I, is a {}".format(i, type(i))) if not isinstance(read_as, User): raise TypeError("{} is not a User, is a {}".format(read_as, type(read_as))) if not secfs.access.can_read(read_as, i): if i.p.is_group(): raise PermissionError("cannot read from group-readable file {0} as {1}; user is not in group".format(i, read_as)) else: raise PermissionError("cannot read from user-readable file {0} as {1}".format(i, read_as)) return get_inode(i).read()[off:off+size]
Read reads [off:off+size] bytes from the file at i.
https://github.com/mit-pdos/secfs-skeleton/blob/835ae6afc0ebd3708b5c08d081d5b4bc7f4165a1/secfs/fs.py#L146-L161
import time import secfs.crypto import secfs.tables import secfs.access import secfs.store.tree import secfs.store.block from secfs.store.inode import Inode from secfs.store.tree import Directory from cryptography.fernet import Fernet from secfs.types import I, Principal, User, Group usermap = {} groupmap = {} owner = None root_i = None def get_inode(i): ihash = secfs.tables.resolve(i) if ihash == None: raise LookupError("asked to resolve i {}, but i does not exist".format(i)) return Inode.load(ihash) def init(owner, users, groups): if not isinstance(owner, User): raise TypeError("{} is not a User, is a {}".format(owner, type(owner))) node = Inode() node.kind = 0 node.ex = True node.ctime = time.time() node.mtime = node.ctime ihash = secfs.store.block.store(node.bytes()) root_i = secfs.tables.modmap(owner, I(owner), ihash) if root_i == None: raise RuntimeError new_ihash = secfs.store.tree.add(root_i, b'.', root_i) secfs.tables.modmap(owner, root_i, new_ihash) new_ihash = secfs.store.tree.add(root_i, b'..', root_i) secfs.tables.modmap(owner, root_i, new_ihash) print("CREATED ROOT AT", new_ihash) init = { b".users": users, b".groups": groups, } import pickle for fn, c in init.items(): bts = pickle.dumps(c) node = Inode() node.kind = 1 node.size = len(bts) node.mtime = node.ctime node.ctime = time.time() node.blocks = [secfs.store.block.store(bts)] ihash = secfs.store.block.store(node.bytes()) i = secfs.tables.modmap(owner, I(owner), ihash) link(owner, i, root_i, fn) return root_i def _create(parent_i, name, create_as, create_for, isdir): if not isinstance(parent_i, I): raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i))) if not isinstance(create_as, User): raise TypeError("{} is not a User, is a {}".format(create_as, type(create_as))) if not isinstance(create_for, Principal): raise TypeError("{} is not a Principal, is a {}".format(create_for, type(create_for))) assert create_as.is_user() assert create_as == create_for or create_for.is_group() if create_for.is_group() and create_for not in groupmap: raise PermissionError("cannot create for unknown group {}".format(create_for)) if not secfs.access.can_write(create_as, parent_i): if parent_i.p.is_group(): raise PermissionError("cannot create in group-writeable directory {0} as {1}; user is not in group".format(parent_i, create_as)) else: raise PermissionError("cannot create in user-writeable directory {0} as {1}".format(parent_i, create_as)) node = Inode() node.ctime = time.time() node.mtime = node.ctime node.kind = 0 if isdir else 1 node.ex = isdir return I(User(0), 0) def create(parent_i, name, create_as, create_for): return _create(parent_i, name, create_as, create_for, False) def mkdir(parent_i, name, create_as, create_for): return _create(parent_i, name, create_as, create_for, True)
MIT License
hyangwinter/flownet3d_pytorch
util.py
sample_and_group_all
python
def sample_and_group_all(xyz, points): device = xyz.device B, N, C = xyz.shape new_xyz = torch.zeros(B, 1, C).to(device) grouped_xyz = xyz.view(B, 1, N, C) if points is not None: new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1) else: new_points = grouped_xyz return new_xyz, new_points
Input: xyz: input points position data, [B, N, C] points: input points data, [B, N, D] Return: new_xyz: sampled points position data, [B, 1, C] new_points: sampled points data, [B, 1, N, C+D]
https://github.com/hyangwinter/flownet3d_pytorch/blob/ae0847d242d3582b3f6f115e64f61e637ef80355/util.py#L191-L208
import torch import torch.nn as nn import torch.nn.functional as F from time import time import numpy as np from lib import pointnet2_utils as pointutils def quat2mat(quat): x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3) return rotMat def transform_point_cloud(point_cloud, rotation, translation): if len(rotation.size()) == 2: rot_mat = quat2mat(rotation) else: rot_mat = rotation return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2) def npmat2euler(mats, seq='zyx'): eulers = [] for i in range(mats.shape[0]): r = Rotation.from_dcm(mats[i]) eulers.append(r.as_euler(seq, degrees=True)) return np.asarray(eulers, dtype='float32') def timeit(tag, t): print("{}: {}s".format(tag, time() - t)) return time() def pc_normalize(pc): l = pc.shape[0] centroid = np.mean(pc, axis=0) pc = pc - centroid m = np.max(np.sqrt(np.sum(pc**2, axis=1))) pc = pc / m return pc def square_distance(src, dst): B, N, _ = src.shape _, M, _ = dst.shape dist = -2 * torch.matmul(src, dst.permute(0, 2, 1)) dist += torch.sum(src ** 2, -1).view(B, N, 1) dist += torch.sum(dst ** 2, -1).view(B, 1, M) return dist def index_points(points, idx): device = points.device B = points.shape[0] view_shape = list(idx.shape) view_shape[1:] = [1] * (len(view_shape) - 1) repeat_shape = list(idx.shape) repeat_shape[0] = 1 batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape) new_points = points[batch_indices, idx, :] return new_points def farthest_point_sample(xyz, npoint): device = xyz.device B, N, C = xyz.shape centroids = torch.zeros(B, npoint, dtype=torch.long).to(device) distance = torch.ones(B, N).to(device) * 1e10 farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) batch_indices = torch.arange(B, dtype=torch.long).to(device) for i in range(npoint): centroids[:, i] = farthest centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) dist = torch.sum((xyz - centroid) ** 2, -1) mask = dist < distance distance[mask] = dist[mask] farthest = torch.max(distance, -1)[1] return centroids def knn_point(k, pos1, pos2): B, N, C = pos1.shape M = pos2.shape[1] pos1 = pos1.view(B,1,N,-1).repeat(1,M,1,1) pos2 = pos2.view(B,M,1,-1).repeat(1,1,N,1) dist = torch.sum(-(pos1-pos2)**2,-1) val,idx = dist.topk(k=k,dim = -1) return torch.sqrt(-val), idx def query_ball_point(radius, nsample, xyz, new_xyz): device = xyz.device B, N, C = xyz.shape _, S, _ = new_xyz.shape group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1]) sqrdists = square_distance(new_xyz, xyz) group_idx[sqrdists > radius ** 2] = N mask = group_idx != N cnt = mask.sum(dim=-1) group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample]) mask = group_idx == N group_idx[mask] = group_first[mask] return group_idx, cnt def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False): B, N, C = xyz.shape S = npoint fps_idx = farthest_point_sample(xyz, npoint) new_xyz = index_points(xyz, fps_idx) idx, _ = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = index_points(xyz, idx) grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C) if points is not None: grouped_points = index_points(points, idx) new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) else: new_points = grouped_xyz_norm if returnfps: return new_xyz, new_points, grouped_xyz, fps_idx else: return new_xyz, new_points
MIT License
square/connect-python-sdk
squareconnect/models/v1_order.py
V1Order.subtotal_money
python
def subtotal_money(self, subtotal_money): self._subtotal_money = subtotal_money
Sets the subtotal_money of this V1Order. The amount of all items purchased in the order, before taxes and shipping. :param subtotal_money: The subtotal_money of this V1Order. :type: V1Money
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/v1_order.py#L294-L303
from pprint import pformat from six import iteritems import re class V1Order(object): def __init__(self, errors=None, id=None, buyer_email=None, recipient_name=None, recipient_phone_number=None, state=None, shipping_address=None, subtotal_money=None, total_shipping_money=None, total_tax_money=None, total_price_money=None, total_discount_money=None, created_at=None, updated_at=None, expires_at=None, payment_id=None, buyer_note=None, completed_note=None, refunded_note=None, canceled_note=None, tender=None, order_history=None, promo_code=None, btc_receive_address=None, btc_price_satoshi=None): self.swagger_types = { 'errors': 'list[Error]', 'id': 'str', 'buyer_email': 'str', 'recipient_name': 'str', 'recipient_phone_number': 'str', 'state': 'str', 'shipping_address': 'Address', 'subtotal_money': 'V1Money', 'total_shipping_money': 'V1Money', 'total_tax_money': 'V1Money', 'total_price_money': 'V1Money', 'total_discount_money': 'V1Money', 'created_at': 'str', 'updated_at': 'str', 'expires_at': 'str', 'payment_id': 'str', 'buyer_note': 'str', 'completed_note': 'str', 'refunded_note': 'str', 'canceled_note': 'str', 'tender': 'V1Tender', 'order_history': 'list[V1OrderHistoryEntry]', 'promo_code': 'str', 'btc_receive_address': 'str', 'btc_price_satoshi': 'float' } self.attribute_map = { 'errors': 'errors', 'id': 'id', 'buyer_email': 'buyer_email', 'recipient_name': 'recipient_name', 'recipient_phone_number': 'recipient_phone_number', 'state': 'state', 'shipping_address': 'shipping_address', 'subtotal_money': 'subtotal_money', 'total_shipping_money': 'total_shipping_money', 'total_tax_money': 'total_tax_money', 'total_price_money': 'total_price_money', 'total_discount_money': 'total_discount_money', 'created_at': 'created_at', 'updated_at': 'updated_at', 'expires_at': 'expires_at', 'payment_id': 'payment_id', 'buyer_note': 'buyer_note', 'completed_note': 'completed_note', 'refunded_note': 'refunded_note', 'canceled_note': 'canceled_note', 'tender': 'tender', 'order_history': 'order_history', 'promo_code': 'promo_code', 'btc_receive_address': 'btc_receive_address', 'btc_price_satoshi': 'btc_price_satoshi' } self._errors = errors self._id = id self._buyer_email = buyer_email self._recipient_name = recipient_name self._recipient_phone_number = recipient_phone_number self._state = state self._shipping_address = shipping_address self._subtotal_money = subtotal_money self._total_shipping_money = total_shipping_money self._total_tax_money = total_tax_money self._total_price_money = total_price_money self._total_discount_money = total_discount_money self._created_at = created_at self._updated_at = updated_at self._expires_at = expires_at self._payment_id = payment_id self._buyer_note = buyer_note self._completed_note = completed_note self._refunded_note = refunded_note self._canceled_note = canceled_note self._tender = tender self._order_history = order_history self._promo_code = promo_code self._btc_receive_address = btc_receive_address self._btc_price_satoshi = btc_price_satoshi @property def errors(self): return self._errors @errors.setter def errors(self, errors): self._errors = errors @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def buyer_email(self): return self._buyer_email @buyer_email.setter def buyer_email(self, buyer_email): self._buyer_email = buyer_email @property def recipient_name(self): return self._recipient_name @recipient_name.setter def recipient_name(self, recipient_name): self._recipient_name = recipient_name @property def recipient_phone_number(self): return self._recipient_phone_number @recipient_phone_number.setter def recipient_phone_number(self, recipient_phone_number): self._recipient_phone_number = recipient_phone_number @property def state(self): return self._state @state.setter def state(self, state): self._state = state @property def shipping_address(self): return self._shipping_address @shipping_address.setter def shipping_address(self, shipping_address): self._shipping_address = shipping_address @property def subtotal_money(self): return self._subtotal_money @subtotal_money.setter
Apache License 2.0
kineticadb/kinetica-api-python
examples/table_monitor_example.py
GPUdbTableMonitorExample.on_delete
python
def on_delete(self, count): self._logger.info("Delete count : %s " % count)
Callback method which is invoked with the number of records updated as received from the table monitor when records are deleted Args: count (int): This is the actual number of records deleted.
https://github.com/kineticadb/kinetica-api-python/blob/530dd3fb73a10a035fe5ec25e4260be6f0514017/examples/table_monitor_example.py#L298-L305
from __future__ import print_function import argparse import datetime import logging import random import time import gpudb from gpudb import GPUdbColumnProperty as GCP, GPUdbRecordColumn as GRC, GPUdbTableMonitor def load_data(table_name): city_data = [ ["Washington", "DC", "USA", -77.016389, 38.904722, 58.5, "UTC-5"], ["Paris", "TX", "USA", -95.547778, 33.6625, 64.6, "UTC-6"], ["Memphis", "TN", "USA", -89.971111, 35.1175, 63, "UTC-6"], ["Sydney", "Nova Scotia", "Canada", -60.19551, 46.13631, 44.5, "UTC-4"], ["La Paz", "Baja California Sur", "Mexico", -110.310833, 24.142222, 77, "UTC-7"], ["St. Petersburg", "FL", "USA", -82.64, 27.773056, 74.5, "UTC-5"], ["Oslo", "--", "Norway", 10.75, 59.95, 45.5, "UTC+1"], ["Paris", "--", "France", 2.3508, 48.8567, 56.5, "UTC+1"], ["Memphis", "--", "Egypt", 31.250833, 29.844722, 73, "UTC+2"], ["St. Petersburg", "--", "Russia", 30.3, 59.95, 43.5, "UTC+3"], ["Lagos", "Lagos", "Nigeria", 3.384082, 6.455027, 83, "UTC+1"], ["La Paz", "Pedro Domingo Murillo", "Bolivia", -68.15, -16.5, 44, "UTC-4"], ["Sao Paulo", "Sao Paulo", "Brazil", -46.633333, -23.55, 69.5, "UTC-3"], ["Santiago", "Santiago Province", "Chile", -70.666667, -33.45, 62, "UTC-4"], ["Buenos Aires", "--", "Argentina", -58.381667, -34.603333, 65, "UTC-3"], ["Manaus", "Amazonas", "Brazil", -60.016667, -3.1, 83.5, "UTC-4"], ["Sydney", "New South Wales", "Australia", 151.209444, -33.865, 63.5, "UTC+10"], ["Auckland", "--", "New Zealand", 174.74, -36.840556, 60.5, "UTC+12"], ["Jakarta", "--", "Indonesia", 106.816667, -6.2, 83, "UTC+7"], ["Hobart", "--", "Tasmania", 147.325, -42.880556, 56, "UTC+10"], ["Perth", "Western Australia", "Australia", 115.858889, -31.952222, 68, "UTC+8"] ] history_table = gpudb.GPUdbTable(name=table_name, db=h_db) random.seed(0) for iter in range(5): city_updates = [] cities = random.sample(city_data, k=random.randint(1, int(len(city_data) / 2))) for city in cities: city_update = list(city) city_update[5] = city_update[5] + random.randrange(-10, 10) city_update.append(datetime.datetime.now()) city_updates.append(city_update) print print("[Main/Loader] Inserting <%s> new city temperatures..." % len(city_updates)) history_table.insert_records(city_updates) time.sleep(2) def create_table(table_name): schema_option = {"collection_name": "examples"} columns = [ ["city", GRC._ColumnType.STRING, GCP.CHAR16], ["state_province", GRC._ColumnType.STRING, GCP.CHAR32], ["country", GRC._ColumnType.STRING, GCP.CHAR16], ["x", GRC._ColumnType.DOUBLE], ["y", GRC._ColumnType.DOUBLE], ["temperature", GRC._ColumnType.DOUBLE], ["time_zone", GRC._ColumnType.STRING, GCP.CHAR8], ["ts", GRC._ColumnType.STRING, GCP.DATETIME] ] gpudb.GPUdbTable( columns, name="table_monitor_history", options=schema_option, db=h_db ) def clear_table(table_name): h_db.clear_table(table_name) def delete_records(h_db, table_name): print("In delete records ...") history_table = gpudb.GPUdbTable(name=table_name, db=h_db) pre_delete_records = history_table.size() print("Records before = %s" % pre_delete_records) delete_expr = ["state_province = 'Sao Paulo'"] history_table.delete_records(expressions=delete_expr) post_delete_records = history_table.size() print("Records after = %s" % post_delete_records) return pre_delete_records - post_delete_records class GPUdbTableMonitorExample(GPUdbTableMonitor.Client): def __init__(self, db, table_name, options=None): callbacks = [ GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_RAW, self.on_insert_raw, self.on_error), GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_DECODED, self.on_insert_decoded, self.on_error, GPUdbTableMonitor.Callback.InsertDecodedOptions( GPUdbTableMonitor.Callback.InsertDecodedOptions.DecodeFailureMode.ABORT )), GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.UPDATED, self.on_update, self.on_error), GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.DELETED, self.on_delete, self.on_error), GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.TABLE_DROPPED, self.on_table_dropped, self.on_error), GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.TABLE_ALTERED, self.on_table_altered, self.on_error) ] super(GPUdbTableMonitorExample, self).__init__(db, table_name, callback_list=callbacks, options=options) def on_insert_raw(self, record): self._logger.info("Raw payload received is : %s " % record) def on_insert_decoded(self, record): self._logger.info("Decoded payload received is : %s " % record) def on_update(self, count): self._logger.info("Update count : %s " % count)
MIT License
karlch/vimiv
vimiv/commandline.py
CommandLine._on_activate
python
def _on_activate(self, entry): if self._app["completions"].activate_tab_completion(): return command = entry.get_text() self.set_text("") if self._app.debug: self._app["log"].write_message("commandline", command) if command in self._history: self._history.remove(command) self._history.insert(0, command) self._pos = 0 if command.lstrip(":") in self.commands.aliases: command = self.commands.aliases[command.lstrip(":")] self._run(command)
Handle input from the entry when activated. Check for type of command (internal, external or path) and run the correct function. Args: entry: The Gtk.Entry from which the command comes.
https://github.com/karlch/vimiv/blob/acbb83e003805e5304131be1f73d7f66528606d6/vimiv/commandline.py#L82-L111
import os import re from subprocess import PIPE, Popen from threading import Thread from gi.repository import GLib, GObject, Gtk from vimiv.commands import Commands from vimiv.exceptions import ArgumentAmountError, NoSearchResultsError from vimiv.helpers import (error_message, expand_filenames, read_file, get_user_data_dir) from vimiv.settings import settings class CommandLine(Gtk.Entry): def __init__(self, app): super(CommandLine, self).__init__() self._app = app self.commands = None self.connect("activate", self._on_activate) self.connect("key_press_event", self._app["eventhandler"].on_key_press, "COMMAND") self.connect("changed", self._on_text_changed) self.set_hexpand(True) self.search = Search(self) datadir = os.path.join(get_user_data_dir(), "vimiv") os.makedirs(datadir, exist_ok=True) old_histfile = os.path.expanduser("~/.vimiv/history") new_histfile = os.path.join(datadir, "history") if os.path.isfile(old_histfile): if os.path.isfile(new_histfile): message = "There is a history file in the deprecated location" " %s and the new location %s. Using the new one. If you " "want the history from the old file you will have to " "merge it manually." % (old_histfile, new_histfile) error_message(message, running_tests=self._app.running_tests) else: os.rename(old_histfile, new_histfile) message = "Moved the old history file %s " "to the new location %s." % (old_histfile, new_histfile) error_message(message, running_tests=self._app.running_tests) self._history = read_file(os.path.join(datadir, "history")) self._matching_history = [] self._last_index = 0 self._pos = 0 self.running_processes = [] self._last_widget = ""
MIT License
khan/alertlib
timeout.py
setup_parser
python
def setup_parser(): alert.DEFAULT_SEVERITY = logging.ERROR parser = alert.setup_parser() parser.add_argument('-k', '--kill-after', type=int, help=('Also send a KILL signal if COMMAND is still ' 'running this long after the initial signal ' 'was sent.')) parser.add_argument('-s', '--signal', type=int, default=15, help=('The signal to be sent on timeout, as an int. ' 'See "kill -l" for a list of signals.')) parser.add_argument('--cwd', default=None, help=('The directory to change to before running cmd')) parser.add_argument('duration', type=int, help=('How many seconds to let the command run.')) parser.add_argument('command', help=('The command to run')) parser.add_argument('arg', nargs=argparse.REMAINDER, help=('Arguments to the command')) return parser
Create an ArgumentParser for timeout-alerting.
https://github.com/khan/alertlib/blob/975fbd4f322f615bcb3289df6014b7691f8249c8/timeout.py#L24-L50
import argparse import logging import os import signal import subprocess import sys import alert import alertlib
MIT License
dcos/dcos-e2e
src/dcos_e2e_cli/dcos_aws/commands/_common.py
existing_cluster_ids
python
def existing_cluster_ids(aws_region: str) -> Set[str]: ec2 = boto3.resource('ec2', region_name=aws_region) ec2_filter = {'Name': 'tag:' + CLUSTER_ID_TAG_KEY, 'Values': ['*']} state_filter = {'Name': 'instance-state-name', 'Values': ['running']} ec2_instances = ec2.instances.filter(Filters=[ec2_filter, state_filter]) cluster_ids = set() for instance in ec2_instances: tag_dict = _tag_dict(instance=instance) cluster_ids.add(tag_dict[CLUSTER_ID_TAG_KEY]) return cluster_ids
Return the IDs of existing clusters. Args: aws_region: The region to get clusters from.
https://github.com/dcos/dcos-e2e/blob/ab7c4bfd58872f458e5766fff01ca74322441065/src/dcos_e2e_cli/dcos_aws/commands/_common.py#L48-L65
from pathlib import Path from typing import Any, Dict, Set import boto3 from boto3.resources.base import ServiceResource from dcos_e2e.backends import AWS from dcos_e2e.cluster import Cluster from dcos_e2e.distributions import Distribution from dcos_e2e.node import Node, Role from dcos_e2e_cli._vendor.dcos_launch import config, get_launcher from dcos_e2e_cli.common.base_classes import ClusterRepresentation CLUSTER_ID_TAG_KEY = 'dcos_e2e.cluster_id' KEY_NAME_TAG_KEY = 'dcos_e2e.key_name' LINUX_DISTRIBUTIONS = { 'centos-7': Distribution.CENTOS_7, } NODE_TYPE_TAG_KEY = 'dcos_e2e.node_type' NODE_TYPE_MASTER_TAG_VALUE = 'master' NODE_TYPE_AGENT_TAG_VALUE = 'agent' NODE_TYPE_PUBLIC_AGENT_TAG_VALUE = 'public_agent' SSH_USER_TAG_KEY = 'dcos_e2e.ssh_user' WORKSPACE_DIR_TAG_KEY = 'dcos_e2e.workspace_dir' def _tag_dict(instance: ServiceResource) -> Dict[str, str]: tag_dict = dict() if instance.tags is None: return tag_dict for tag in instance.tags: key = tag['Key'] value = tag['Value'] tag_dict[key] = value return tag_dict
Apache License 2.0
awslabs/dgl-ke
python/dglke/dist_train.py
is_local
python
def is_local(ip_addr): if ip_addr in local_ip4_addr_list(): return True else: return False
If ip_addr is a local ip
https://github.com/awslabs/dgl-ke/blob/30558e069c42038cded08bddd26ac75f153aae75/python/dglke/dist_train.py#L78-L84
import os import stat import sys import subprocess import argparse import socket if os.name != 'nt': import fcntl import struct from .utils import CommonArgParser SCRIPT_FILE = 'dglke_start_kvserver_kvclient.sh' class ArgParser(CommonArgParser): def __init__(self): super(ArgParser, self).__init__() self.add_argument('--path', type=str, help='path of distributed workspace.') self.add_argument('--ssh_key', type=str, help='ssh private key.') self.add_argument('--ip_config', type=str, help='IP configuration file of kvstore.') self.add_argument('--num_client_proc', type=int, default=1, help='Number of client process on each machine.') def get_machine_count(ip_config): with open(ip_config) as f: machine_count = len(f.readlines()) return machine_count def local_ip4_addr_list(): nic = set() for ix in socket.if_nameindex(): name = ix[1] s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: ip = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, struct.pack('256s', name[:15].encode("UTF-8")))[20:24]) except OSError as e: if e.errno == 99: print("Warning!", "Interface: {}".format(name), "IP address not available for interface.", sep='\n') continue else: raise e nic.add(ip) return nic
Apache License 2.0
vlsida/openram
compiler/base/geometry.py
geometry.ur
python
def ur(self): return self.boundary[1]
Return the upper right corner
https://github.com/vlsida/openram/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/base/geometry.py#L121-L123
import debug from vector import vector import tech import math import copy import numpy as np from globals import OPTS from utils import round_to_grid class geometry: def __init__(self, lpp=None): self.width = 0 self.height = 0 if lpp: self.lpp = lpp self.layerNumber = lpp[0] self.layerPurpose = lpp[1] def __str__(self): debug.error("__str__ must be overridden by all geometry types.", 1) def __repr__(self): debug.error("__repr__ must be overridden by all geometry types.", 1) def transform_coords(self, coords, offset, mirr, angle): coordinate = [] for item in coords: x = item[0] * math.cos(angle) - item[1] * mirr * math.sin(angle) + offset[0] y = item[0] * math.sin(angle) + item[1] * mirr * math.cos(angle) + offset[1] coordinate += [[x, y]] return coordinate def normalize(self): (first, second) = self.boundary ll = vector(min(first[0], second[0]), min(first[1], second[1])).snap_to_grid() ur = vector(max(first[0], second[0]), max(first[1], second[1])).snap_to_grid() self.boundary = [ll, ur] def update_boundary(self): self.compute_boundary(self.offset, self.mirror, self.rotate) def compute_boundary(self, offset=vector(0, 0), mirror="", rotate=0): if OPTS.netlist_only: self.boundary = [vector(0, 0), vector(0, 0)] return (ll, ur) = [vector(0, 0), vector(self.width, self.height)] if mirror == "MX": ll = ll.scale(1, -1) ur = ur.scale(1, -1) elif mirror == "MY": ll = ll.scale(-1, 1) ur = ur.scale(-1, 1) elif mirror == "XY": ll = ll.scale(-1, -1) ur = ur.scale(-1, -1) elif mirror == "" or mirror == "R0": pass else: debug.error("Invalid mirroring: {}".format(mirror), -1) if rotate == 0: pass elif rotate == 90: ll = ll.rotate_scale(-1, 1) ur = ur.rotate_scale(-1, 1) elif rotate == 180: ll = ll.scale(-1, -1) ur = ur.scale(-1, -1) elif rotate == 270: ll = ll.rotate_scale(1, -1) ur = ur.rotate_scale(1, -1) else: debug.error("Invalid rotation: {}".format(rotate), -1) self.boundary = [offset + ll, offset + ur] self.normalize() def ll(self): return self.boundary[0]
BSD 3-Clause New or Revised License
pathwaycommons/semantic-search
semantic_search/ncbi.py
_parse_medline
python
def _parse_medline(text: str) -> List[dict]: f = io.StringIO(text) medline_records = Medline.parse(f) return medline_records
Convert the rettype=medline to dict. See https://www.nlm.nih.gov/bsd/mms/medlineelements.html
https://github.com/pathwaycommons/semantic-search/blob/3581399f53a1efd460795098b432e3f7f36b88bd/semantic_search/ncbi.py#L66-L72
import io import os from pathlib import Path from typing import Any, Dict, List, Generator import logging import time import requests from Bio import Medline from dotenv import load_dotenv from pydantic import BaseSettings from fastapi import HTTPException log = logging.getLogger(__name__) def _compact(input: List) -> List: return [x for x in input if x is not None and x is not False and x != ""] MAX_EFETCH_RETMAX = 10000 dot_env_filepath = Path(__file__).absolute().parent.parent / ".env" load_dotenv(dot_env_filepath) class Settings(BaseSettings): app_name: str = os.getenv("APP_NAME", "") app_version: str = os.getenv("APP_VERSION", "") app_url: str = os.getenv("APP_URL", "") admin_email: str = os.getenv("ADMIN_EMAIL", "") ncbi_eutils_api_key: str = os.getenv("NCBI_EUTILS_API_KEY", "") eutils_base_url: str = os.getenv("EUTILS_BASE_URL", "") eutils_efetch_url: str = eutils_base_url + os.getenv("EUTILS_EFETCH_BASENAME", "") eutils_esummary_url: str = eutils_base_url + os.getenv("EUTILS_ESUMMARY_BASENAME", "") http_request_timeout: int = int(os.getenv("HTTP_REQUEST_TIMEOUT", -1)) settings = Settings() def _safe_request(url: str, method: str = "GET", headers={}, **opts): user_agent = f"{settings.app_name}/{settings.app_version} ({settings.app_url};mailto:{settings.admin_email})" request_headers = {"user-agent": user_agent} request_headers.update(headers) try: r = requests.request( method, url, headers=request_headers, timeout=settings.http_request_timeout, **opts ) r.raise_for_status() except requests.exceptions.Timeout as e: logging.error(f"Timeout error {e}") raise except requests.exceptions.HTTPError as e: logging.error(f"HTTP error {e}; status code: {r.status_code}") raise except requests.exceptions.RequestException as e: logging.error(f"Error in request {e}") raise else: return r
MIT License
cetic/python-msp430-tools
msp430/asm/mcu_definition_parser.py
MCUDefintitions.word_SYMBOL
python
def word_SYMBOL(self, stack): symbol_name = self.next_word() address = self.next_word() if ':' in address: self.memory_map[symbol_name] = {} for pair in address.split(','): key, value = pair.split(':') self.memory_map[symbol_name][key] = value else: self.memory_map[symbol_name] = {'address':int(address,16)} self.memory_map[symbol_name]['__name__'] = symbol_name self.memory_map[symbol_name]['__type__'] = 'symbol'
\ Example:: symbol <name> <address> Defines a symbol with the value specified. ``<address>`` can also be a computed value. e.g. ``in:RAM,location:end``. Supported are: ``in:<segment_name>`` and ``location:[start|end]``. These values are computed at load time, i.e. the segment still have the address range specified in the definition (opposed to the values after the linker has "shrinked" the segments to the size of actually present data). Note that ``location:end`` is the segments last address plus one (end is exclusive in this case).
https://github.com/cetic/python-msp430-tools/blob/71a86dd82206aaeb46dcf2c2f4f01c3aeb46eeef/msp430/asm/mcu_definition_parser.py#L306-L334
import rpn import pkgutil class MCUDefintitionError(Exception): def filtered_words(words, mapping): for word in words: yield mapping.get(word, word) class MCUDefintitions(rpn.RPN): def __init__(self): rpn.RPN.__init__(self) self.flags = [] self.memory_maps = {} self.memory_map = None self.order = 0 def address_range(self, range_str): a, b = range_str.split('-') return int(a, 0), int(b, 0) @rpn.word('TEMPLATE-BEGIN') def word_TEMPLATE_BEGIN(self, stack): template = [] while True: word = self.next_word() if word.lower() == 'template-variables': break template.append(word) template_variables = [] while True: word = self.next_word() if word.lower() == 'template-values': break template_variables.append(word) template_row = [] while True: word = self.next_word() if word.lower() == 'template-end': if template_row: raise MCUDefintitionError('number of values in template not a multiple of number of variables') break template_row.append(word) if len(template_row) == len(template_variables): self.memory_maps.update(parse_words(filtered_words(template, dict(zip(template_variables, template_row))))) template_row = [] @rpn.word('PROGRAMMABLE') def word_PROGRAMMABLE(self, stack): if self.memory_map is None: raise MCUDefintitionError('flags outside memory map definition not allowed') self.flags.append('programmable') @rpn.word('READ-ONLY') def word_READ_ONLY(self, stack): if self.memory_map is None: raise MCUDefintitionError('flags outside memory map definition not allowed') self.flags.append('read-only') @rpn.word('MEMORY-MAP-BEGIN') def word_MEMORY_MAP_BEGIN(self, stack): if self.memory_map is not None: raise MCUDefintitionError('MEMORY-MAP-BEGIN without terminating the last map') self.memory_map = {} @rpn.word('MEMORY-MAP-END') def word_MEMORY_MAP_END(self, stack): if '__name__' not in self.memory_map: raise MCUDefintitionError('each memory map requires a NAME') self.memory_maps[self.memory_map['__name__']] = self.memory_map self.memory_map = None @rpn.word('SEGMENT') def word_SEGMENT(self, stack): if self.memory_map is None: raise MCUDefintitionError('SEGMENT outside memory map definition not allowed') segment_name = self.next_word() address = self.next_word() if ':' in address: self.memory_map[segment_name] = {} for pair in address.split(','): key, value = pair.split(':') self.memory_map[segment_name][key] = value else: start, end = self.address_range(address) self.memory_map[segment_name] = {'start':start, 'end':end} self.memory_map[segment_name]['order'] = self.order self.memory_map[segment_name]['flags'] = self.flags self.memory_map[segment_name]['__name__'] = segment_name self.memory_map[segment_name]['__type__'] = 'segment' self.flags = [] self.order += 1 @rpn.word('SYMBOL')
BSD 3-Clause New or Revised License
raelgc/scudcloud
scudcloud/notify2.py
Notification.connect
python
def connect(self, event, callback): if event != 'closed': raise ValueError("'closed' is the only valid value for event", event) self._closed_callback = callback
Set the callback for the notification closing; the only valid value for event is 'closed'. The API is compatible with pynotify.
https://github.com/raelgc/scudcloud/blob/c8b00f4a1fc712c7323a723c45d3f860fd5c60aa/scudcloud/notify2.py#L299-L305
import dbus EXPIRES_DEFAULT = -1 EXPIRES_NEVER = 0 URGENCY_LOW = 0 URGENCY_NORMAL = 1 URGENCY_CRITICAL = 2 urgency_levels = [URGENCY_LOW, URGENCY_NORMAL, URGENCY_CRITICAL] initted = False appname = "" _have_mainloop = False class UninittedError(RuntimeError): pass class UninittedDbusObj(object): def __getattr__(self, name): raise UninittedError("You must call notify2.init() before using the " "notification features.") dbus_iface = UninittedDbusObj() def init(app_name, mainloop=None): global appname, initted, dbus_iface, _have_mainloop if mainloop == 'glib': from dbus.mainloop.glib import DBusGMainLoop mainloop = DBusGMainLoop() elif mainloop == 'qt': from dbus.mainloop.qt import DBusQtMainLoop mainloop = DBusQtMainLoop(set_as_default=True) bus = dbus.SessionBus(mainloop=mainloop) dbus_obj = bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications') dbus_iface = dbus.Interface(dbus_obj, dbus_interface='org.freedesktop.Notifications') appname = app_name initted = True if mainloop or dbus.get_default_main_loop(): _have_mainloop = True dbus_iface.connect_to_signal('ActionInvoked', _action_callback) dbus_iface.connect_to_signal('NotificationClosed', _closed_callback) return True def is_initted(): return initted def get_app_name(): return appname def uninit(): global initted, dbus_iface, _have_mainloop initted = False _have_mainloop = False dbus_iface = UninittedDbusObj() def get_server_caps(): return [str(x) for x in dbus_iface.GetCapabilities()] def get_server_info(): res = dbus_iface.GetServerInformation() return {'name': str(res[0]), 'vendor': str(res[1]), 'version': str(res[2]), 'spec-version': str(res[3]), } notifications_registry = {} def _action_callback(nid, action): nid, action = int(nid), str(action) n = notifications_registry[nid] n._action_callback(action) def _closed_callback(nid, reason): nid, reason = int(nid), int(reason) n = notifications_registry[nid] n._closed_callback(n) del notifications_registry[nid] def no_op(*args): pass class Notification(object): id = 0 timeout = -1 _closed_callback = no_op def __init__(self, summary, message='', icon=''): self.summary = summary self.message = message self.icon = icon self.hints = {} self.actions = {} self.data = {} def show(self): nid = dbus_iface.Notify(appname, self.id, self.icon, self.summary, self.message, self._make_actions_array(), self.hints, self.timeout, ) self.id = int(nid) if _have_mainloop: notifications_registry[self.id] = self return True def update(self, summary, message="", icon=None): self.summary = summary self.message = message if icon is not None: self.icon = icon def close(self): if self.id != 0: dbus_iface.CloseNotification(self.id) def set_hint(self, key, value): self.hints[key] = value set_hint_string = set_hint_int32 = set_hint_double = set_hint def set_hint_byte(self, key, value): self.hints[key] = dbus.Byte(value) def set_urgency(self, level): if level not in urgency_levels: raise ValueError("Unknown urgency level specified", level) self.set_hint_byte("urgency", level) def set_category(self, category): self.hints['category'] = category def set_timeout(self, timeout): if not isinstance(timeout, int): raise TypeError("timeout value was not int", timeout) self.timeout = timeout def get_timeout(self): return self.timeout def add_action(self, action, label, callback, user_data=None): self.actions[action] = (label, callback, user_data) def _make_actions_array(self): arr = [] for action, (label, callback, user_data) in self.actions.items(): arr.append(action) arr.append(label) return arr def _action_callback(self, action): try: label, callback, user_data = self.actions[action] except KeyError: return if user_data is None: callback(self, action) else: callback(self, action, user_data)
MIT License
msiemens/tinydb
tinydb/queries.py
Query.matches
python
def matches(self, regex: str, flags: int = 0) -> QueryInstance: def test(value): if not isinstance(value, str): return False return re.match(regex, value, flags) is not None return self._generate_test(test, ('matches', self._path, regex))
Run a regex test against a dict value (whole string has to match). >>> Query().f1.matches(r'^\\w+$') :param regex: The regular expression to use for matching :param flags: regex flags to pass to ``re.match``
https://github.com/msiemens/tinydb/blob/187cd0dc24ef0df76f8f3d571bd348b1306cd2c2/tinydb/queries.py#L322-L337
import re import sys from typing import Mapping, Tuple, Callable, Any, Union, List from .utils import freeze if sys.version_info >= (3, 8): from typing import Protocol else: from typing_extensions import Protocol __all__ = ('Query', 'QueryLike', 'where') def is_sequence(obj): return hasattr(obj, '__iter__') class QueryLike(Protocol): def __call__(self, value: Mapping) -> bool: ... def __hash__(self): ... class QueryInstance: def __init__(self, test: Callable[[Mapping], bool], hashval: Tuple): self._test = test self._hash = hashval def __call__(self, value: Mapping) -> bool: return self._test(value) def __hash__(self): return hash(self._hash) def __repr__(self): return 'QueryImpl{}'.format(self._hash) def __eq__(self, other: object): if isinstance(other, QueryInstance): return self._hash == other._hash return False def __and__(self, other: 'QueryInstance') -> 'QueryInstance': return QueryInstance(lambda value: self(value) and other(value), ('and', frozenset([self._hash, other._hash]))) def __or__(self, other: 'QueryInstance') -> 'QueryInstance': return QueryInstance(lambda value: self(value) or other(value), ('or', frozenset([self._hash, other._hash]))) def __invert__(self) -> 'QueryInstance': return QueryInstance(lambda value: not self(value), ('not', self._hash)) class Query(QueryInstance): def __init__(self) -> None: self._path = () def notest(_): raise RuntimeError('Empty query was evaluated') super().__init__( test=notest, hashval=(None,) ) def __repr__(self): return '{}()'.format(type(self).__name__) def __hash__(self): return super().__hash__() def __getattr__(self, item: str): query = type(self)() query._path = self._path + (item,) query._hash = ('path', query._path) return query def __getitem__(self, item: str): return self.__getattr__(item) def _generate_test( self, test: Callable[[Any], bool], hashval: Tuple, allow_empty_path: bool = False ) -> QueryInstance: if not self._path and not allow_empty_path: raise ValueError('Query has no path') def runner(value): try: for part in self._path: value = value[part] except (KeyError, TypeError): return False else: return test(value) return QueryInstance( lambda value: runner(value), hashval ) def __eq__(self, rhs: Any): return self._generate_test( lambda value: value == rhs, ('==', self._path, freeze(rhs)) ) def __ne__(self, rhs: Any): return self._generate_test( lambda value: value != rhs, ('!=', self._path, freeze(rhs)) ) def __lt__(self, rhs: Any) -> QueryInstance: return self._generate_test( lambda value: value < rhs, ('<', self._path, rhs) ) def __le__(self, rhs: Any) -> QueryInstance: return self._generate_test( lambda value: value <= rhs, ('<=', self._path, rhs) ) def __gt__(self, rhs: Any) -> QueryInstance: return self._generate_test( lambda value: value > rhs, ('>', self._path, rhs) ) def __ge__(self, rhs: Any) -> QueryInstance: return self._generate_test( lambda value: value >= rhs, ('>=', self._path, rhs) ) def exists(self) -> QueryInstance: return self._generate_test( lambda _: True, ('exists', self._path) )
MIT License
city-of-helsinki/wagtail-svgmap
wagtail_svgmap/svg.py
Link.__init__
python
def __init__(self, url, target=None): self.url = str(url) self.target = target
Construct a link. :param url: The URL to link to. :type url: str :param target: Optional link target (HTML semantics; `_blank` for a new window, etc.) :type target: str|None
https://github.com/city-of-helsinki/wagtail-svgmap/blob/a9883570029a23b555fd0ce2b8bdfc423c3032ed/wagtail_svgmap/svg.py#L60-L70
import re from six import BytesIO try: from xml.etree import cElementTree as ET except ImportError: from xml.etree import ElementTree as ET SVG_NAMESPACE = 'http://www.w3.org/2000/svg' XLINK_NAMESPACE = 'http://www.w3.org/1999/xlink' ET.register_namespace('svg', SVG_NAMESPACE) ET.register_namespace('xlink', XLINK_NAMESPACE) VISIBLE_SVG_TAGS = frozenset({ 'a', 'circle', 'ellipse', 'g', 'image', 'line', 'path', 'polygon', 'polyline', 'rect', 'switch', 'text', 'textPath', 'tref', 'tspan', 'use', }) def find_ids(svg_stream, in_elements=VISIBLE_SVG_TAGS): for event, elem in ET.iterparse(svg_stream, events=('end',)): tag_without_ns = elem.tag.split('}')[-1] if in_elements and tag_without_ns not in in_elements: continue id = elem.get('id') if id: yield id class Link(object):
BSD 3-Clause New or Revised License
dmitriy-serdyuk/twinnet-asr
libs/Theano/theano/sandbox/cuda/opt.py
local_gpu_elemwise_0
python
def local_gpu_elemwise_0(node): if (isinstance(node.op, tensor.Elemwise) and dtype_in_elemwise_supported(node.op)): if any([i.owner and isinstance(i.owner.op, HostFromGpu) for i in node.inputs]): if all([o.type.dtype == 'float32' for o in node.outputs]): if isinstance(node.op.scalar_op, Erfinv): new_op = GpuElemwise(erfinv_gpu) elif isinstance(node.op.scalar_op, Erfcx): new_op = GpuElemwise(erfcx_gpu) else: try: new_op = GpuElemwise(node.op.scalar_op) except SupportCodeError: return False upcastable = set(['float32', 'int8', 'int16', 'uint8', 'uint16']) if all([i.type.dtype == 'float32' for i in node.inputs]): gpu_elemwise = new_op(*(gpu_from_host(i) for i in node.inputs), return_list=True) elif all([i.type.dtype in upcastable for i in node.inputs]): upcasted = node.op.make_node(*[tensor.cast(i, 'float32') for i in node.inputs]) if [o.type for o in upcasted.outputs] == [o.type for o in node.outputs]: new_inputs = [gpu_from_host(tensor.cast(i, 'float32')) for i in node.inputs] gpu_elemwise = new_op(*new_inputs, return_list=True) else: return False else: return False gpu_elemwise = split_huge_add_or_mul(gpu_elemwise[0].owner) if not gpu_elemwise: return False if (max_inputs_to_GpuElemwise(node) < len(gpu_elemwise.inputs)): return False return [host_from_gpu(out) for out in gpu_elemwise.outputs]
Elemwise(..., host_from_gpu, ...) -> host_from_gpu(elemwise(gpu_from_host, ..., gpu_from_host)
https://github.com/dmitriy-serdyuk/twinnet-asr/blob/799220d682306467a2b401e42e788f8c33382b00/libs/Theano/theano/sandbox/cuda/opt.py#L261-L321
from __future__ import print_function import copy import logging import pdb import sys import time import warnings import numpy from six.moves import reduce, xrange import theano from theano import scalar as scal from theano import config, tensor, gof import theano.ifelse from theano.compile import optdb from theano.gof import (local_optimizer, EquilibriumDB, ProxyDB, Optimizer, TopoOptimizer, toolbox) from theano.gof.opt import LocalMetaOptimizer from theano.sandbox.cuda import as_cuda_ndarray_variable from theano.sandbox.cuda.basic_ops import ( gpu_eye, gpu_contiguous, gpu_from_host, host_from_gpu, GpuFromHost, HostFromGpu, GpuContiguous, GpuElemwise, GpuDimShuffle, GpuReshape, GpuCAReduce, GpuFlatten, gpu_flatten, GpuSubtensor, GpuAdvancedSubtensor1, GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20, GpuIncSubtensor, gpu_alloc, GpuAlloc, gpu_shape, GpuSplit, GpuAllocEmpty) from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda.blas import ( gpu_dot22, gpu_dot22scalar, gpu_gemm_inplace, gpu_gemm_no_inplace, GpuConv, GpuBatchedDot, GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights, GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights) from theano.sandbox.cuda.blas import gpu_gemv_inplace from theano.sandbox.cuda.cula import gpu_solve from theano.sandbox.cuda.blas import gpu_gemv_no_inplace from theano.sandbox.cuda.blas import gpu_ger_inplace from theano.sandbox.cuda.blas import gpu_ger_no_inplace from theano.sandbox.cuda.blas import ( GpuDownsampleFactorMax, GpuDownsampleFactorMaxGrad, GpuDownsampleFactorMaxGradGrad) from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter from theano.sandbox.cuda.blocksparse import ( GpuSparseBlockGemv, GpuSparseBlockOuter, gpu_sparse_block_gemv_inplace, gpu_sparse_block_outer_inplace) from theano.sandbox.cuda.nnet import ( GpuCrossentropySoftmaxArgmax1HotWithBias, GpuCrossentropySoftmax1HotWithBiasDx, GpuSoftmax, GpuSoftmaxWithBias) from theano.sandbox.cuda.elemwise import SupportCodeError from theano.scalar.basic_scipy import Erfinv from theano.scalar.basic_scipy import Erfcx from theano.sandbox.cuda.elemwise import erfinv_gpu from theano.sandbox.cuda.elemwise import erfcx_gpu from theano.sandbox.cuda.var import CudaNdarrayConstant from theano.sandbox.cuda import gpu_optimizer, register_opt, gpu_seqopt, GpuOp import theano.sandbox.cuda.extra_ops from theano.scan_module import scan_utils, scan_op, scan_opt from theano.tensor.blas import _is_real_vector, _is_real_matrix from theano.tensor import nlinalg from theano.tensor import slinalg from theano.tensor.nnet.Conv3D import Conv3D from theano.tests.breakpoint import PdbBreakpoint from theano.tensor.nnet.abstract_conv import (BaseAbstractConv2d, AbstractConv2d, AbstractConv2d_gradWeights, AbstractConv2d_gradInputs) from theano.tensor.opt import register_specialize_device try: from theano.sandbox.cuda import device_properties except ImportError: pass _logger = logging.getLogger('theano.sandbox.cuda.opt') gpu_cut_copies = EquilibriumDB() gpu_seqopt.register('gpu_local_optimizations', gpu_optimizer, 1, 'fast_run', 'fast_compile', 'gpu') gpu_seqopt.register('gpu_cut_transfers', gpu_cut_copies, 2, 'fast_run', 'fast_compile', 'gpu') optdb.register('gpu_opt', gpu_seqopt, optdb.__position__.get('add_destroy_handler', 49.5) - 1, 'gpu') optdb.register('gpu_after_fusion', ProxyDB(gpu_seqopt), optdb.__position__.get('elemwise_fusion', 49) + .1, 'gpu') gpu_optimizer.register('gpu_merge', theano.gof.opt.MergeOptimizer(), 'fast_run', 'fast_compile', final_opt=True) register_opt()(theano.tensor.opt.local_track_shape_i) register_opt(final_opt=True, name='gpu_constant_folding')( tensor.opt.constant_folding) register_opt()(theano.tensor.opt.local_subtensor_make_vector) gpu_optimizer.register('local_remove_all_assert', theano.tensor.opt.local_remove_all_assert, 'unsafe') register_opt(name='local_gpu_reshape_chain')( theano.tensor.opt.local_reshape_chain(GpuReshape)) import theano.tensor.signal.pool import theano.tensor.nnet.neighbours cpu_ops_moved_to_gpu = [ tensor.blas.Dot22, tensor.blas.Dot22Scalar, tensor.blas.Gemm, tensor.blas.Gemv, tensor.blas.Ger, tensor.nnet.conv.ConvOp, tensor.signal.pool.Pool, tensor.signal.pool.MaxPoolGrad, tensor.signal.pool.AveragePoolGrad, theano.tensor.nnet.neighbours.Images2Neibs, tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias, tensor.nnet.CrossentropySoftmax1HotWithBiasDx, tensor.nnet.Softmax, tensor.nnet.SoftmaxWithBias, tensor.Elemwise, tensor.DimShuffle, tensor.CAReduce, tensor.elemwise.All, tensor.elemwise.Any, tensor.elemwise.CAReduceDtype, tensor.elemwise.Sum, tensor.elemwise.Prod, tensor.elemwise.ProdWithoutZeros, tensor.Reshape, tensor.flatten, tensor.Subtensor, tensor.AdvancedSubtensor1, tensor.AdvancedIncSubtensor1, tensor.IncSubtensor, tensor.Shape, tensor.Join, tensor.Alloc, tensor.Eye, tensor.blas.BatchedDot] class InputToGpuOptimizer(Optimizer): def __init__(self): Optimizer.__init__(self) def add_requirements(self, fgraph): fgraph.attach_feature(toolbox.ReplaceValidate()) def apply(self, fgraph): for input in fgraph.inputs: if isinstance(input.type, CudaNdarrayType): continue if (len(input.clients) == 1 and (input.clients[0][0] == 'output' or input.clients[0][0].op == gpu_from_host)): continue try: new_input = host_from_gpu(gpu_from_host(input)) if new_input.type == input.type: fgraph.replace_validate(input, new_input, "InputToGpuOptimizer") except TypeError: pass gpu_seqopt.register('InputToGpuOptimizer', InputToGpuOptimizer(), 0, 'fast_run', 'fast_compile', 'merge') @local_optimizer([gpu_from_host, host_from_gpu]) def local_cut_gpu_host_gpu(node): if tensor.opt.opt.check_chain(node, gpu_from_host, host_from_gpu): return [node.inputs[0].owner.inputs[0]] if tensor.opt.opt.check_chain(node, host_from_gpu, gpu_from_host): return [node.inputs[0].owner.inputs[0]] return False gpu_cut_copies.register('cut_gpu_host_transfers', local_cut_gpu_host_gpu, 'fast_run', 'fast_compile', 'gpu') gpu_cut_copies.register('cut_gpu_constant_transfers', tensor.opt.constant_folding, 'fast_run', 'fast_compile', 'gpu') optdb['canonicalize'].register('local_cut_gpu_host_gpu', local_cut_gpu_host_gpu, 'fast_run', 'fast_compile', 'gpu') elemwise_cuda_dtype_supported = ['float32', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64'] def dtype_in_elemwise_supported(op): def get_all_basic_scalar(composite_op): l = [] for i in composite_op.fgraph.toposort(): if isinstance(i, theano.scalar.Composite): l += get_all_basic_scalar(i) else: l.append(i) return l if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise): if isinstance(op.scalar_op, theano.scalar.Composite): scals = get_all_basic_scalar(op.scalar_op) for s in scals: if any([i.type.dtype not in elemwise_cuda_dtype_supported for i in s.inputs + s.outputs]): return False return True @register_opt() @local_optimizer([tensor.Elemwise])
MIT License
treverhines/rbf
rbf/pde/fd.py
weight_matrix
python
def weight_matrix(x, p, n, diffs, coeffs=None, phi='phs3', order=None, eps=1.0, chunk_size=1000): x = np.asarray(x, dtype=float) assert_shape(x, (None, None), 'x') nx, ndim = x.shape p = np.asarray(p, dtype=float) assert_shape(p, (None, ndim), 'p') diffs = np.asarray(diffs, dtype=int) diffs = np.atleast_2d(diffs) assert_shape(diffs, (None, ndim), 'diffs') if coeffs is None: coeffs = np.ones(len(diffs), dtype=float) else: coeffs = np.asarray(coeffs, dtype=float) assert_shape(coeffs, (len(diffs), ...), 'coeffs') coeffs = np.array([np.broadcast_to(c, (nx,)) for c in coeffs]) _, stencils = KDTree(p).query(x, n) if chunk_size is None: data = weights( x, p[stencils], diffs, coeffs=coeffs, phi=phi, order=order, eps=eps) else: data = np.empty((nx, n), dtype=float) for start in range(0, nx, chunk_size): stop = start + chunk_size data[start:stop] = weights( x[start:stop], p[stencils[start:stop]], diffs, coeffs=coeffs[:, start:stop], phi=phi, order=order, eps=eps) data = data.ravel() rows = np.repeat(range(nx), n) cols = stencils.ravel() out = sp.coo_matrix((data, (rows, cols)), (nx, len(p))) return out
Returns a weight matrix which maps a function's values at `p` to an approximation of that function's derivative at `x`. This is a convenience function which first creates stencils and then computes the RBF-FD weights for each stencil. Parameters ---------- x : (N, D) float array Target points where the derivative is being approximated p : (M, D) array Source points. The derivatives will be approximated with a weighted sum of values at these point. n : int The stencil size. Each target point will have a stencil made of the `n` nearest neighbors from `p` diffs : (D,) int array or (K, D) int array Derivative orders for each spatial dimension. For example `[2, 0]` indicates that the weights should approximate the second derivative with respect to the first spatial dimension in two-dimensional space. `diffs` can also be a (K, D) array, where each (D,) sub-array is a term in a differential operator. For example the two-dimensional Laplacian can be represented as `[[2, 0], [0, 2]]`. coeffs : (K,) or (K, N) float array, optional Coefficients for each term in the differential operator specified with `diffs`. The coefficients can vary between target points. Defaults to an array of ones. phi : rbf.basis.RBF instance or str, optional Type of RBF. Select from those available in `rbf.basis` or create your own. order : int, optional Order of the added polynomial. This defaults to the highest derivative order. For example, if `diffs` is `[[2, 0], [0, 1]]`, then this is set to 2. eps : float, optional Shape parameter for each RBF chunk_size : int, optional Break the target points into chunks with this size to reduce the memory requirements Returns ------- (N, M) coo sparse matrix Examples -------- Create a second order differentiation matrix in one-dimensional space >>> x = np.arange(4.0)[:, None] >>> W = weight_matrix(x, x, 3, (2,)) >>> W.toarray() array([[ 1., -2., 1., 0.], [ 1., -2., 1., 0.], [ 0., 1., -2., 1.], [ 0., 1., -2., 1.]])
https://github.com/treverhines/rbf/blob/49f378174f2921ff9976b846e6490f1d0c9ff83a/rbf/pde/fd.py#L168-L282
from __future__ import division from functools import lru_cache import logging import numpy as np import scipy.sparse as sp from rbf.basis import phs3, get_rbf from rbf.poly import monomial_count, monomial_powers, mvmonos from rbf.utils import assert_shape, KDTree from rbf.linalg import as_array logger = logging.getLogger(__name__) @lru_cache() def _max_poly_order(size, dim): order = -1 while monomial_count(order + 1, dim) <= size: order += 1 return order def weights(x, s, diffs, coeffs=None, phi=phs3, order=None, eps=1.0): x = np.asarray(x, dtype=float) assert_shape(x, (..., None), 'x') bcast = x.shape[:-1] ndim = x.shape[-1] s = np.asarray(s, dtype=float) assert_shape(s, (..., None, ndim), 's') s = np.broadcast_to(s, bcast + s.shape[-2:]) ssize = s.shape[-2] diffs = np.asarray(diffs, dtype=int) diffs = np.atleast_2d(diffs) assert_shape(diffs, (None, ndim), 'diffs') if coeffs is None: coeffs = np.ones(len(diffs), dtype=float) else: coeffs = np.asarray(coeffs, dtype=float) assert_shape(coeffs, (len(diffs), ...), 'coeffs') coeffs = [np.broadcast_to(c, bcast) for c in coeffs] phi = get_rbf(phi) max_order = _max_poly_order(ssize, ndim) if order is None: order = diffs.sum(axis=1).max() order = min(order, max_order) if order > max_order: raise ValueError('Polynomial order is too high for the stencil size') x = x[..., None, :] s = s - x x = np.zeros_like(x) pwr = monomial_powers(order, ndim) A = phi(s, s, eps=eps) P = mvmonos(s, pwr) Pt = np.einsum('...ij->...ji', P) Z = np.zeros(bcast + (len(pwr), len(pwr)), dtype=float) LHS = np.block([[A, P], [Pt, Z]]) a, p = 0.0, 0.0 for c, d in zip(coeffs, diffs): a += c[..., None, None]*phi(x, s, eps=eps, diff=d) p += c[..., None, None]*mvmonos(x, pwr, diff=d) a = as_array(a)[..., 0, :] p = p[..., 0, :] rhs = np.concatenate((a, p), axis=-1) w = np.linalg.solve(LHS, rhs)[..., :ssize] return w
MIT License
karlgong/easyium-python
easyium/web_driver.py
WebDriver.quit
python
def quit(self): self._selenium_web_driver().quit()
Quits the driver and closes every associated window.
https://github.com/karlgong/easyium-python/blob/3683c9895e0e1164848df082d456b6801f29e68a/easyium/web_driver.py#L74-L78
from appium.webdriver.clipboard_content_type import ClipboardContentType from appium.webdriver.common.multi_action import MultiAction from appium.webdriver.common.touch_action import TouchAction from appium.webdriver.webdriver import WebDriver as _Appium from selenium.common.exceptions import WebDriverException from selenium.webdriver import ActionChains, Ie as _Ie, Firefox as _Firefox, Chrome as _Chrome, Opera as _Opera, Safari as _Safari, Edge as _Edge, PhantomJS as _PhantomJS, Remote as _Remote from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from .alert import Alert from .context import Context from .decorator import SupportedBy from .enumeration import WebDriverPlatform, WebDriverContext from .utils import StringTypes from .waiter import WebDriverWaitFor class WebDriverInfo: def __init__(self, platform, context): self.platform = platform self.context = context class WebDriver(Context): def __init__(self, selenium_web_driver, web_driver_info): Context.__init__(self) self.__selenium_web_driver = selenium_web_driver self.__web_driver_info = web_driver_info self.set_wait_interval(1000) self.set_wait_timeout(30000) def _selenium_context(self): return self.__selenium_web_driver def _selenium_web_driver(self): return self.__selenium_web_driver def get_web_driver(self): return self def get_web_driver_info(self): return self.__web_driver_info def get_desired_capabilities(self): return self._selenium_web_driver().desired_capabilities def get_application_cache(self): return self._selenium_web_driver().application_cache
Apache License 2.0
jytime/dicl-flow
loss_functions.py
warp
python
def warp(x, flo): B, C, H, W = x.size() xx = torch.arange(0, W).view(1,-1).repeat(H,1) yy = torch.arange(0, H).view(-1,1).repeat(1,W) xx = xx.view(1,1,H,W).repeat(B,1,1,1) yy = yy.view(1,1,H,W).repeat(B,1,1,1) grid = torch.cat((xx,yy),1).float() if x.is_cuda: grid = grid.cuda() vgrid = Variable(grid) + flo vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:]/max(W-1,1)-1.0 vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:]/max(H-1,1)-1.0 vgrid = vgrid.permute(0,2,3,1) output = nn.functional.grid_sample(x, vgrid) mask = torch.autograd.Variable(torch.ones(x.size())).cuda() mask = nn.functional.grid_sample(mask, vgrid) mask[mask<0.9999] = 0 mask[mask>0] = 1 return output*mask
warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow
https://github.com/jytime/dicl-flow/blob/7ec4ad869f406db8cc2cf901cf71f7476557cc28/loss_functions.py#L185-L217
import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import numpy as np import cv2 import math from config import cfg, cfg_from_file, save_config_to_file def MultiScale_UP(output,target,loss_type='L1',weight=[1.,0.5,0.25],valid_range=None,removezero=False,extra_mask=None): loss = 0 loss_list = [] b, _, h, w = target.size() if (type(output) is not tuple) and (type(output) is not set): output = {output} for i, cur_output in enumerate(output): realflow = F.interpolate(cur_output, (h,w), mode='bilinear', align_corners=True) realflow[:,0,:,:] = realflow[:,0,:,:]*(w/cur_output.shape[3]) realflow[:,1,:,:] = realflow[:,1,:,:]*(h/cur_output.shape[2]) with torch.no_grad(): if i==0: epe = realEPE(realflow,target,extra_mask=extra_mask) if loss_type=='L2': lossvalue = torch.norm(realflow-target,p=2,dim=1) elif loss_type=='robust': lossvalue = ((realflow-target).abs().sum(dim=1)+1e-8) lossvalue = lossvalue**0.4 elif loss_type=='L1': lossvalue = (realflow-target).abs().sum(dim=1) else: raise NotImplementedError if cfg.USE_VALID_RANGE and valid_range is not None: with torch.no_grad(): mask = (target[:,0,:,:].abs()<=valid_range[i][1]) & (target[:,1,:,:].abs()<=valid_range[i][0]) else: with torch.no_grad(): mask = torch.ones(target[:,0,:,:].shape).type_as(target) lossvalue = lossvalue*mask.float() if extra_mask is not None: val = extra_mask > 0 lossvalue = lossvalue[val] cur_loss = lossvalue.mean()*weight[i] assert lossvalue.shape[0] == extra_mask.sum() else: cur_loss = lossvalue.mean()*weight[i] loss+=cur_loss loss_list.append(cur_loss) loss = loss/len(output) return loss,loss_list,epe def random_select_points(x,y,x_,y_,samples=10): idx=torch.randperm(x.shape[0]) x=x[idx[:samples],:] y=y[idx[:samples],:] x_=x_[idx[:samples],:] y_=y_[idx[:samples],:] return x,y,x_,y_ def subspace_loss_batch(flow): B, _, H, W = flow.size() xx = Variable(torch.arange(0, W).view(1,-1).repeat(H,1).cuda()) yy = Variable(torch.arange(0, H).view(-1,1).repeat(1,W).cuda()) grid_x = xx.view(1,1,H,W).repeat(B,1,1,1).float() grid_y = yy.view(1,1,H,W).repeat(B,1,1,1).float() flow_u = flow[:,0,:,:].unsqueeze(1) flow_v = flow[:,1,:,:].unsqueeze(1) pos_x = grid_x + flow_u pos_y = grid_y + flow_v inside_x = (pos_x <= (W-1)) & (pos_x >= 0.0) inside_y = (pos_y <= (H-1)) & (pos_y >= 0.0) inside = inside_x & inside_y loss = 0 least_num = 2000 list_X =[] list_X_ = [] for i in range(B): grid_x_i = grid_x[i,:,:,:] grid_y_i = grid_y[i,:,:,:] pos_x_i = pos_x[i,:,:,:] pos_y_i = pos_y[i,:,:,:] inside_i= inside[i,:,:,:] if inside_i.sum()>least_num: x = torch.masked_select(grid_x_i, inside_i).view(-1,1) y = torch.masked_select(grid_y_i, inside_i).view(-1,1) x_ = torch.masked_select(pos_x_i, inside_i).view(-1,1) y_ = torch.masked_select(pos_y_i, inside_i).view(-1,1) x, y, x_, y_ = random_select_points(x,y,x_,y_,samples=least_num) o = torch.ones_like(x) x, y, x_, y_ = x/W, y/W, x_/W, y_/W X = torch.cat((x,x,x,y,y,y,o,o,o),1).permute(1,0) X_ = torch.cat((x_,y_,o,x_,y_,o,x_,y_,o),1).permute(1,0) list_X.append(X.unsqueeze(0)) list_X_.append(X_.unsqueeze(0)) all_X = torch.cat(list_X) all_X_ = torch.cat(list_X_) M = all_X*all_X_ lambda1 = 10 MTM = lambda1 * torch.matmul(M.permute(0,2,1),M) I = torch.eye(MTM.size()[1]).type_as(MTM).unsqueeze(0).repeat(B,1,1) MTM_inverse = torch.inverse((I + MTM)) C = torch.matmul(MTM_inverse,MTM) C2 = C**2 loss1 = torch.sum(C2.view(-1,1),dim=0) loss2 = lambda1 * torch.sum(((torch.matmul(M,C)-M)**2).view(-1,1),dim=0) loss += (loss1 + loss2) return loss/B def EPE_flow(input_flow, target_flow): return torch.norm(target_flow-input_flow,p=2,dim=1).mean() class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, output, target): lossvalue = torch.abs(output - target).mean() return lossvalue class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output-target,p=2,dim=1).mean() return lossvalue class L1Loss(nn.Module): def __init__(self): super(L1Loss, self).__init__() self.loss = L1() self.loss_labels = ['L1', 'EPE'] def forward(self, output, target): lossvalue = self.loss(output, target) epevalue = EPE_flow(output, target) return [lossvalue, epevalue] class L2Loss(nn.Module): def __init__(self): super(L2Loss, self).__init__() self.loss = L2() self.loss_labels = ['L2', 'EPE'] def forward(self, output, target): lossvalue = self.loss(output, target) epevalue = EPE_flow(output, target) return [lossvalue, epevalue]
MIT License
memgraph/mage
python/mage/graph_coloring_module/graph.py
Graph.label
python
def label(self, node: int) -> Any: return self._indices_to_labels[node]
Returns the node label.
https://github.com/memgraph/mage/blob/69f0242aceb47fc383d0e56077f08b2b061273b5/python/mage/graph_coloring_module/graph.py#L95-L97
from typing import Tuple, List, Any, Dict, Iterator from itertools import islice class Graph: def __init__( self, nodes: List[Any], adjacency_list: Dict[Any, List[Tuple[Any, float]]], name: str = "", ): self._indices_to_labels = nodes self._labels_to_indices = dict( (label, index) for index, label in enumerate(nodes) ) self._nodes_count = len(nodes) self._neighbors_positions = [] self._neighbors = [] self._weights = [] self._name = name for i in range(self._nodes_count): self._neighbors.extend( [ self._labels_to_indices[node[0]] for node in adjacency_list[self._indices_to_labels[i]] ] ) self._weights.extend( [node[1] for node in adjacency_list[self._indices_to_labels[i]]] ) self._neighbors_positions.append(len(self._neighbors)) def __str__(self): return self._name def __len__(self): return self._nodes_count def __getitem__(self, node: int) -> Iterator[int]: start = self._neighbors_positions[node - 1] if node != 0 else 0 end = self._neighbors_positions[node] return islice(self._neighbors, start, end) @property def nodes(self) -> Iterator[int]: nodes = (node for node in range(self._nodes_count)) return nodes def number_of_nodes(self) -> int: return self._nodes_count def number_of_edges(self) -> int: return len(self._neighbors) // 2 def neighbors(self, node: int) -> Iterator[int]: return self.__getitem__(node) def weighted_neighbors(self, node: int) -> Iterator[Tuple[int, float]]: start = self._neighbors_positions[node - 1] if node != 0 else 0 end = self._neighbors_positions[node] return self._neighbor_weight_tuples(start, end) def weight(self, node_1: int, node_2: int) -> float: weighted_neighs = self.weighted_neighbors(node_1) for node, weight in weighted_neighs: if node == node_2: return weight return 0 def degree(self, node: int) -> int: start = self._neighbors_positions[node - 1] if node != 0 else 0 end = self._neighbors_positions[node] return start - end
Apache License 2.0
sciforce/phones-las
las/ops.py
pyramidal_stack
python
def pyramidal_stack(outputs, sequence_length): shape = tf.shape(outputs) batch_size, max_time = shape[0], shape[1] num_units = outputs.get_shape().as_list()[-1] paddings = [[0, 0], [0, tf.floormod(max_time, 2)], [0, 0]] outputs = tf.pad(outputs, paddings) ''' even_time = outputs[:, ::2, :] odd_time = outputs[:, 1::2, :] concat_outputs = tf.concat([even_time, odd_time], -1) ''' concat_outputs = tf.reshape(outputs, (batch_size, -1, num_units * 2)) return concat_outputs, tf.floordiv(sequence_length, 2) + tf.floormod(sequence_length, 2)
even_time = outputs[:, ::2, :] odd_time = outputs[:, 1::2, :] concat_outputs = tf.concat([even_time, odd_time], -1)
https://github.com/sciforce/phones-las/blob/f95523fbbdf1dd7f1acce5b25c37b620f3eb8e9b/las/ops.py#L49-L65
import tensorflow as tf __all__ = [ 'lstm_cell', 'bilstm', 'pyramidal_bilstm', ] def lstm_cell(num_units, dropout, mode): cell = tf.nn.rnn_cell.LSTMCell(num_units, initializer=tf.random_uniform_initializer(minval=-0.075, maxval=0.075)) dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0 if dropout > 0.0: cell = tf.nn.rnn_cell.DropoutWrapper( cell=cell, input_keep_prob=(1.0 - dropout)) return cell def bilstm(inputs, sequence_length, num_units, dropout, mode, unidirectional=False): with tf.variable_scope('fw_cell'): forward_cell = lstm_cell(num_units, dropout, mode) if not unidirectional: with tf.variable_scope('bw_cell'): backward_cell = lstm_cell(num_units, dropout, mode) return tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, inputs, sequence_length=sequence_length, dtype=tf.float32) else: return tf.nn.dynamic_rnn( forward_cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
Apache License 2.0
secondmind-labs/trieste
trieste/objectives/single_objectives.py
hartmann_6
python
def hartmann_6(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 6))]) a = [1.0, 1.2, 3.0, 3.2] A = [ [10.0, 3.0, 17.0, 3.5, 1.7, 8.0], [0.05, 10.0, 17.0, 0.1, 8.0, 14.0], [3.0, 3.5, 1.7, 10.0, 17.0, 8.0], [17.0, 8.0, 0.05, 10.0, 0.1, 14.0], ] P = [ [0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886], [0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991], [0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.6650], [0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381], ] inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1) return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True)
The Hartmann 6 test function over :math:`[0, 1]^6`. This function has 6 local and one global minima. See https://www.sfu.ca/~ssurjano/hart6.html for details. :param x: The points at which to evaluate the function, with shape [..., 6]. :return: The function values at ``x``, with shape [..., 1]. :raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
https://github.com/secondmind-labs/trieste/blob/b58eb924a49ad86e27fa2e082defe2d37afcc14a/trieste/objectives/single_objectives.py#L339-L366
from __future__ import annotations import math from math import pi import tensorflow as tf from ..space import Box from ..types import TensorType def _branin_internals(x: TensorType, scale: TensorType, translate: TensorType) -> TensorType: x0 = x[..., :1] * 15.0 - 5.0 x1 = x[..., 1:] * 15.0 b = 5.1 / (4 * math.pi ** 2) c = 5 / math.pi r = 6 s = 10 t = 1 / (8 * math.pi) return scale * ((x1 - b * x0 ** 2 + c * x0 - r) ** 2 + s * (1 - t) * tf.cos(x0) + translate) def branin(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 2))]) return _branin_internals(x, 1, 10) def scaled_branin(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 2))]) return _branin_internals(x, 1 / 51.95, -44.81) _ORIGINAL_BRANIN_MINIMIZERS = tf.constant( [[-math.pi, 12.275], [math.pi, 2.275], [9.42478, 2.475]], tf.float64 ) BRANIN_MINIMIZERS = (_ORIGINAL_BRANIN_MINIMIZERS + [5.0, 0.0]) / 15.0 BRANIN_MINIMUM = tf.constant([0.397887], tf.float64) SCALED_BRANIN_MINIMUM = tf.constant([-1.047393], tf.float64) BRANIN_SEARCH_SPACE = Box([0.0], [1.0]) ** 2 def gramacy_lee(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 1))]) return tf.sin(10 * math.pi * x) / (2 * x) + (x - 1) ** 4 GRAMACY_LEE_MINIMIZER = tf.constant([[0.548562]], tf.float64) GRAMACY_LEE_MINIMUM = tf.constant([-0.869011], tf.float64) GRAMACY_LEE_SEARCH_SPACE = Box([0.5], [2.5]) def logarithmic_goldstein_price(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 2))]) x0, x1 = tf.split(4 * x - 2, 2, axis=-1) a = (x0 + x1 + 1) ** 2 b = 19 - 14 * x0 + 3 * x0 ** 2 - 14 * x1 + 6 * x0 * x1 + 3 * x1 ** 2 c = (2 * x0 - 3 * x1) ** 2 d = 18 - 32 * x0 + 12 * x0 ** 2 + 48 * x1 - 36 * x0 * x1 + 27 * x1 ** 2 return (1 / 2.427) * (tf.math.log((1 + a * b) * (30 + c * d)) - 8.693) LOGARITHMIC_GOLDSTEIN_PRICE_MINIMIZER = tf.constant([[0.5, 0.25]], tf.float64) LOGARITHMIC_GOLDSTEIN_PRICE_MINIMUM = tf.constant([-3.12913], tf.float64) LOGARITHMIC_GOLDSTEIN_PRICE_SEARCH_SPACE = Box([0.0], [1.0]) ** 2 def hartmann_3(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 3))]) a = [1.0, 1.2, 3.0, 3.2] A = [[3.0, 10.0, 30.0], [0.1, 10.0, 35.0], [3.0, 10.0, 30.0], [0.1, 10.0, 35.0]] P = [ [0.3689, 0.1170, 0.2673], [0.4699, 0.4387, 0.7470], [0.1091, 0.8732, 0.5547], [0.0381, 0.5743, 0.8828], ] inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1) return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True) HARTMANN_3_MINIMIZER = tf.constant([[0.114614, 0.555649, 0.852547]], tf.float64) HARTMANN_3_MINIMUM = tf.constant([-3.86278], tf.float64) HARTMANN_3_SEARCH_SPACE = Box([0.0], [1.0]) ** 3 def shekel_4(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 4))]) y: TensorType = x * 10.0 beta = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5] C = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6], [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6], ] inner_sum = tf.reduce_sum((tf.expand_dims(y, -1) - C) ** 2, 1) inner_sum += tf.cast(tf.transpose(beta), dtype=inner_sum.dtype) return -tf.reduce_sum(inner_sum ** (-1), -1, keepdims=True) SHEKEL_4_MINIMIZER = tf.constant([[0.4, 0.4, 0.4, 0.4]], tf.float64) SHEKEL_4_MINIMUM = tf.constant([-10.5363], tf.float64) SHEKEL_4_SEARCH_SPACE = Box([0.0], [1.0]) ** 4 def rosenbrock_4(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 4))]) y: TensorType = x * 15.0 - 5 unscaled_function = tf.reduce_sum( (100.0 * (y[..., 1:] - y[..., :-1]) ** 2 + (1 - y[..., :-1]) ** 2), axis=-1, keepdims=True ) return (unscaled_function - 3.827 * 1e5) / (3.755 * 1e5) ROSENBROCK_4_MINIMIZER = tf.constant([[0.4, 0.4, 0.4, 0.4]], tf.float64) ROSENBROCK_4_MINIMUM = tf.constant([-1.01917], tf.float64) ROSENBROCK_4_SEARCH_SPACE = Box([0.0], [1.0]) ** 4 def ackley_5(x: TensorType) -> TensorType: tf.debugging.assert_shapes([(x, (..., 5))]) x = (x - 0.5) * (32.768 * 2.0) exponent_1 = -0.2 * tf.math.sqrt((1 / 5.0) * tf.reduce_sum(x ** 2, -1)) exponent_2 = (1 / 5.0) * tf.reduce_sum(tf.math.cos(2.0 * math.pi * x), -1) function = ( -20.0 * tf.math.exp(exponent_1) - tf.math.exp(exponent_2) + 20.0 + tf.cast(tf.math.exp(1.0), dtype=tf.float64) ) return tf.expand_dims(function, -1) ACKLEY_5_MINIMIZER = tf.constant([[0.5, 0.5, 0.5, 0.5, 0.5]], tf.float64) ACKLEY_5_MINIMUM = tf.constant([0.0], tf.float64) ACKLEY_5_SEARCH_SPACE = Box([0.0], [1.0]) ** 5
Apache License 2.0
gugarosa/opfython
opfython/math/distance.py
squared_euclidean_distance
python
def squared_euclidean_distance(x, y): dist = (x - y) ** 2 return np.sum(dist)
Calculates the Squared Euclidean Distance. Args: x (np.array): N-dimensional array. y (np.array): N-dimensional array. Returns: The Squared Euclidean Distance between x and y.
https://github.com/gugarosa/opfython/blob/9c737952f5bd109d5a8a50fd438fcac3d9c3e42b/opfython/math/distance.py#L775-L789
import math import numpy as np from numba import njit import opfython.utils.constants as c import opfython.utils.decorator as d @d.avoid_zero_division @njit(cache=True) def additive_symmetric_distance(x, y): dist = ((x - y) ** 2 * (x + y)) / (x * y) return 2 * np.sum(dist) @njit(cache=True) def average_euclidean_distance(x, y): dist = squared_euclidean_distance(x, y) return (dist / x.shape[0]) ** 0.5 @d.avoid_zero_division @njit(cache=True) def bhattacharyya_distance(x, y): dist = -math.log(np.sum((x * y) ** 0.5)) return dist @d.avoid_zero_division @njit(cache=True) def bray_curtis_distance(x, y): dist = np.sum(np.fabs(x - y)) / np.sum(x + y) return dist @d.avoid_zero_division @njit(cache=True) def canberra_distance(x, y): dist = np.fabs(x - y) / (np.fabs(x) + np.fabs(y)) return np.sum(dist) @njit(cache=True) def chebyshev_distance(x, y): dist = np.fabs(x - y) return np.amax(dist) @d.avoid_zero_division @njit(cache=True) def chi_squared_distance(x, y): dist = ((x - y) ** 2 / (x + y)) return 0.5 * np.sum(dist) @d.avoid_zero_division @njit(cache=True) def chord_distance(x, y): dist = 2 - 2 * (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5)) return dist ** 0.5 @d.avoid_zero_division @njit(cache=True) def clark_distance(x, y): dist = ((x - y) / np.fabs(x + y)) ** 2 return np.sum(dist) ** 0.5 @d.avoid_zero_division @njit(cache=True) def cosine_distance(x, y): dist = 1 - (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5)) return dist @d.avoid_zero_division @njit(cache=True) def dice_distance(x, y): dist = 2 * np.sum(x * y) / (np.sum(x ** 2) + np.sum(y ** 2)) return 1 - dist @d.avoid_zero_division @njit(cache=True) def divergence_distance(x, y): dist = (x - y) ** 2 / (x + y) ** 2 return 2 * np.sum(dist) @njit(cache=True) def euclidean_distance(x, y): dist = (x - y) ** 2 return np.sum(dist) ** 0.5 @njit(cache=True) def gaussian_distance(x, y, gamma=1): dist = (x - y) ** 2 return math.exp(-gamma * np.sum(dist) ** 0.5) @njit(cache=True) def gower_distance(x, y): dist = np.fabs(x - y) return np.sum(dist) / x.shape[0] @njit(cache=True) def hamming_distance(x, y): dist = np.count_nonzero(x != y) return dist @d.avoid_zero_division @njit(cache=True) def hassanat_distance(x, y): dist = np.zeros(x.shape[0]) mask = np.minimum(x, y) >= 0 for i in range(x.shape[0]): if mask[i] is True: dist[i] = 1 - (1 + np.minimum(x[i], y[i])) / (1 + np.maximum(x[i], y[i])) else: dist[i] = 1 - (1 + np.minimum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i]))) / (1 + np.maximum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i]))) return np.sum(dist) @njit(cache=True) def hellinger_distance(x, y): dist = 2 * (x ** 0.5 - y ** 0.5) ** 2 return np.sum(dist) ** 0.5 @d.avoid_zero_division def jaccard_distance(x, y): dist = np.sum((x - y) ** 2) / (np.sum(x ** 2) + np.sum(y ** 2) - np.sum(x * y)) return dist @d.avoid_zero_division @njit(cache=True) def jeffreys_distance(x, y): dist = (x - y) * np.log(x / y) return np.sum(dist) @d.avoid_zero_division @njit(cache=True) def jensen_distance(x, y): dist = (x * np.log(x) + y * np.log(y)) / 2 - ((x + y) / 2) * np.log((x + y) / 2) return 0.5 * np.sum(dist) @d.avoid_zero_division @njit(cache=True) def jensen_shannon_distance(x, y): dist1 = x * np.log((2 * x) / (x + y)) dist2 = y * np.log((2 * y) / (x + y)) return 0.5 * (np.sum(dist1) + np.sum(dist2)) @d.avoid_zero_division @njit(cache=True) def k_divergence_distance(x, y): dist = x * np.log((2 * x) / (x + y)) return np.sum(dist) @d.avoid_zero_division @njit(cache=True) def kulczynski_distance(x, y): dist = np.sum(np.fabs(x - y)) / np.sum(np.minimum(x, y)) return dist @d.avoid_zero_division @njit(cache=True) def kullback_leibler_distance(x, y): dist = x * np.log(x / y) return np.sum(dist) @njit(cache=True) def log_euclidean_distance(x, y): dist = euclidean_distance(x, y) return c.MAX_ARC_WEIGHT * math.log(dist + 1) @njit(cache=True) def log_squared_euclidean_distance(x, y): dist = squared_euclidean_distance(x, y) return c.MAX_ARC_WEIGHT * math.log(dist + 1) @njit(cache=True) def lorentzian_distance(x, y): dist = np.log(1 + np.fabs(x - y)) return np.sum(dist) @njit(cache=True) def manhattan_distance(x, y): dist = np.fabs(x - y) return np.sum(dist) @njit(cache=True) def matusita_distance(x, y): dist = (x ** 0.5 - y ** 0.5) ** 2 return np.sum(dist) ** 0.5 @d.avoid_zero_division @njit(cache=True) def max_symmetric_distance(x, y): dist1 = (x - y) ** 2 / x dist2 = (x - y) ** 2 / y return np.maximum(np.sum(dist1), np.sum(dist2)) @d.avoid_zero_division @njit(cache=True) def mean_censored_euclidean_distance(x, y): dist = squared_euclidean_distance(x, y) diff = np.count_nonzero(x + y != 0) return (dist / diff) ** 0.5 @d.avoid_zero_division @njit(cache=True) def min_symmetric_distance(x, y): dist1 = (x - y) ** 2 / x dist2 = (x - y) ** 2 / y return np.minimum(np.sum(dist1), np.sum(dist2)) @d.avoid_zero_division @njit(cache=True) def neyman_distance(x, y): dist = (x - y) ** 2 / x return np.sum(dist) @njit(cache=True) def non_intersection_distance(x, y): dist = np.fabs(x - y) return 0.5 * np.sum(dist) @d.avoid_zero_division @njit(cache=True) def pearson_distance(x, y): dist = (x - y) ** 2 / y return np.sum(dist) @d.avoid_zero_division @njit(cache=True) def sangvi_distance(x, y): dist = (x - y) ** 2 / (x + y) return 2 * np.sum(dist) @d.avoid_zero_division @njit(cache=True) def soergel_distance(x, y): dist = np.sum(np.fabs(x - y)) / np.sum(np.maximum(x, y)) return dist @d.avoid_zero_division @njit(cache=True) def squared_distance(x, y): dist = (x - y) ** 2 / (x + y) return np.sum(dist) @njit(cache=True) def squared_chord_distance(x, y): dist = (x ** 0.5 - y ** 0.5) ** 2 return np.sum(dist) @njit(cache=True)
Apache License 2.0
google-research/federated
rcc_dp/modify_pi.py
modify_pi
python
def modify_pi(pi, eta, epsilon, multiplicative_factor): if eta < epsilon / 2: raise ValueError('eta should be larger than epsilon/2.') number_candidates = len(pi) p = np.zeros(number_candidates) + 1.0 / number_candidates p = p * multiplicative_factor pi_all = [pi] mass_above = np.sum(np.maximum(pi - np.exp(eta) * p, 0)) mass_below = np.sum(np.maximum(np.exp(-eta) * p - pi, 0)) if mass_above == 0 and mass_below == 0: return pi_all elif mass_above > mass_below: below = pi < np.exp(-eta) * p pi_new = pi.copy() pi_new[below] = np.exp(-eta) * p[below] indices = np.argsort(np.exp(eta) * p - pi_all[-1]) budget = mass_below for i in indices: diff = pi_new[i] - np.exp(eta) * p[i] if diff > budget: pi_new[i] -= budget break elif diff > 0: pi_new[i] -= diff budget -= diff pi_all.append(pi_new.copy()) mass_above = np.sum(np.maximum(pi_new - np.exp(eta) * p, 0)) while mass_above > 0: above = pi_new >= np.exp(eta) * p pi_new[above] = np.exp(eta) * p[above] not_above = ~above pi_new[not_above] += mass_above * pi[not_above] / np.sum(pi[not_above]) pi_all.append(pi_new.copy()) mass_above = np.sum(np.maximum(pi_new - np.exp(eta) * p, 0)) else: above = pi > np.exp(eta) * p pi_new = pi.copy() pi_new[above] = np.exp(eta) * p[above] indices = np.argsort(pi_all[-1] - np.exp(-eta) * p) budget = mass_above for i in indices: diff = np.exp(-eta) * p[i] - pi_new[i] if diff > budget: pi_new[i] += budget break elif diff > 0: pi_new[i] += diff budget -= diff pi_all.append(pi_new.copy()) mass_below = np.sum(np.maximum(np.exp(-eta) * p - pi_new, 0)) while mass_below > 0: below = pi_new <= np.exp(-eta) * p pi_new[below] = np.exp(-eta) * p[below] not_below = ~below pi_new[not_below] -= mass_below * pi[not_below] / np.sum(pi[not_below]) pi_all.append(pi_new.copy()) mass_below = np.sum(np.maximum(np.exp(-eta) * p - pi_new, 0)) return pi_all
This function modifies the distribution pi to make it 2eta-LDP. The function essentially ensures that the new distribution lies between the upper threshold `exp(eta) * multiplicative_factor * p` and the lower threshold `exp(-eta) * multiplicative_factor * p`. It first checks if the distribution already lies inside the thresholds. If not, it trades as mass beyond the upper threshold with mass beyond the lower threshold. In other words, it ensures that at least one of the constraint is satisfied (the one which is violated less severely). This is done with the help of the helper function `trade_mass`. Next, it iteratively enforces the constraint that is still violated and renormalizes the distribution. This is done with the help of the helper function `normalize`. Args: pi: The input distribution to be modified eta: The privacy parameter that is half times the desired privacy guarantee. epsilon: The privacy parameter epsilon. multiplicative_factor: The factor by which the uniform distribution over the candidates is scaled with. Returns: pi_all: The container containing how the distrbution pi evolved from pi to tilde_pi (which is equal to pi_all[-1]). Further, tilde_pi is 2eta-LDP.
https://github.com/google-research/federated/blob/909953fa8945cfac01328e0a6d878e1dc0376c3c/rcc_dp/modify_pi.py#L22-L151
import numpy as np
Apache License 2.0
econ-ark/hark
HARK/ConsumptionSaving/ConsMarkovModel.py
MarkovConsumerType.update_solution_terminal
python
def update_solution_terminal(self): IndShockConsumerType.update_solution_terminal(self) StateCount = self.MrkvArray[0].shape[0] self.solution_terminal.cFunc = StateCount * [self.cFunc_terminal_] self.solution_terminal.vFunc = StateCount * [self.solution_terminal.vFunc] self.solution_terminal.vPfunc = StateCount * [self.solution_terminal.vPfunc] self.solution_terminal.vPPfunc = StateCount * [self.solution_terminal.vPPfunc] self.solution_terminal.mNrmMin = np.zeros(StateCount) self.solution_terminal.hRto = np.zeros(StateCount) self.solution_terminal.MPCmax = np.ones(StateCount) self.solution_terminal.MPCmin = np.ones(StateCount)
Update the terminal period solution. This method should be run when a new AgentType is created or when CRRA changes. Parameters ---------- none Returns ------- none
https://github.com/econ-ark/hark/blob/d59269d4ca41afd5e2c3bf8c50ebf99b2073b998/HARK/ConsumptionSaving/ConsMarkovModel.py#L931-L955
from copy import deepcopy import numpy as np from HARK import AgentType from HARK.ConsumptionSaving.ConsIndShockModel import ( ConsIndShockSolver, ConsumerSolution, IndShockConsumerType, PerfForesightConsumerType, ) from HARK.distribution import ( DiscreteDistribution, MarkovProcess, Uniform, calc_expectation ) from HARK.interpolation import ( CubicInterp, LowerEnvelope, LinearInterp, ValueFuncCRRA,MargValueFuncCRRA ) from HARK.utilities import ( CRRAutility, CRRAutilityP, CRRAutilityPP, CRRAutilityP_inv, CRRAutility_invP, CRRAutility_inv, CRRAutilityP_invP, ) __all__ = ["ConsMarkovSolver", "MarkovConsumerType"] utility = CRRAutility utilityP = CRRAutilityP utilityPP = CRRAutilityPP utilityP_inv = CRRAutilityP_inv utility_invP = CRRAutility_invP utility_inv = CRRAutility_inv utilityP_invP = CRRAutilityP_invP class ConsMarkovSolver(ConsIndShockSolver): def __init__( self, solution_next, IncShkDstn_list, LivPrb, DiscFac, CRRA, Rfree_list, PermGroFac_list, MrkvArray, BoroCnstArt, aXtraGrid, vFuncBool, CubicBool, ): self.solution_next = solution_next self.IncShkDstn_list = IncShkDstn_list self.LivPrb = LivPrb self.DiscFac = DiscFac self.CRRA = CRRA self.BoroCnstArt = BoroCnstArt self.aXtraGrid = aXtraGrid self.vFuncBool = vFuncBool self.CubicBool = CubicBool self.Rfree_list=Rfree_list self.PermGroFac_list=PermGroFac_list self.MrkvArray=MrkvArray self.StateCount=MrkvArray.shape[0] self.def_utility_funcs() def solve(self): self.def_boundary() self.EndOfPrdvFunc_list = [] self.EndOfPrdvPfunc_list = [] self.Ex_IncNextAll = ( np.zeros(self.StateCount) + np.nan ) self.WorstIncPrbAll = ( np.zeros(self.StateCount) + np.nan ) for j in range(self.StateCount): self.condition_on_state(j) self.Ex_IncNextAll[j] = np.dot( self.ShkPrbsNext, self.PermShkValsNext * self.TranShkValsNext ) self.WorstIncPrbAll[j] = self.WorstIncPrb EndOfPrdvPfunc_cond = self.make_EndOfPrdvPfuncCond() self.EndOfPrdvPfunc_list.append(EndOfPrdvPfunc_cond) if self.vFuncBool: EndOfPrdvFunc_cond = self.make_EndOfPrdvFuncCond() self.EndOfPrdvFunc_list.append(EndOfPrdvFunc_cond) self.calc_EndOfPrdvP() self.calc_HumWealth_and_BoundingMPCs() aNrm = ( np.asarray(self.aXtraGrid)[np.newaxis, :] + np.array(self.BoroCnstNat_list)[:, np.newaxis] ) self.get_points_for_interpolation(self.EndOfPrdvP, aNrm) cNrm = np.hstack((np.zeros((self.StateCount, 1)), self.cNrmNow)) mNrm = np.hstack( (np.reshape(self.mNrmMin_list, (self.StateCount, 1)), self.mNrmNow) ) self.BoroCnstNat = self.BoroCnstNat_list solution = self.make_solution(cNrm, mNrm) return solution def def_boundary(self): self.BoroCnstNatAll = np.zeros(self.StateCount) + np.nan for j in range(self.StateCount): PermShkMinNext = np.min(self.IncShkDstn_list[j].X[0]) TranShkMinNext = np.min(self.IncShkDstn_list[j].X[1]) self.BoroCnstNatAll[j] = ( (self.solution_next.mNrmMin[j] - TranShkMinNext) * (self.PermGroFac_list[j] * PermShkMinNext) / self.Rfree_list[j] ) self.BoroCnstNat_list = np.zeros(self.StateCount) + np.nan self.mNrmMin_list = np.zeros(self.StateCount) + np.nan self.BoroCnstDependency = np.zeros((self.StateCount, self.StateCount)) + np.nan for i in range(self.StateCount): possible_next_states = self.MrkvArray[i, :] > 0 self.BoroCnstNat_list[i] = np.max(self.BoroCnstNatAll[possible_next_states]) if self.BoroCnstArt is None: self.mNrmMin_list[i] = self.BoroCnstNat_list[i] else: self.mNrmMin_list[i] = np.max( [self.BoroCnstNat_list[i], self.BoroCnstArt] ) self.BoroCnstDependency[i, :] = ( self.BoroCnstNat_list[i] == self.BoroCnstNatAll ) def condition_on_state(self, state_index): self.IncShkDstn = self.IncShkDstn_list[state_index] self.Rfree = self.Rfree_list[state_index] self.PermGroFac = self.PermGroFac_list[state_index] self.vPfuncNext = self.solution_next.vPfunc[state_index] self.mNrmMinNow = self.mNrmMin_list[state_index] self.BoroCnstNat = self.BoroCnstNatAll[state_index] self.set_and_update_values( self.solution_next, self.IncShkDstn, self.LivPrb, self.DiscFac ) self.DiscFacEff = ( self.DiscFac ) self.vPfuncNext = self.solution_next.vPfunc[state_index] if self.CubicBool: self.vPPfuncNext = self.solution_next.vPPfunc[state_index] if self.vFuncBool: self.vFuncNext = self.solution_next.vFunc[state_index] def calc_EndOfPrdvPP(self): def vpp_next(shocks, a_nrm): return shocks[0] ** (- self.CRRA - 1.0) * self.vPPfuncNext(self.m_nrm_next(shocks, a_nrm)) EndOfPrdvPP = ( self.DiscFacEff * self.Rfree * self.Rfree * self.PermGroFac ** (-self.CRRA - 1.0) * calc_expectation( self.IncShkDstn, vpp_next, self.aNrmNow ) ) return EndOfPrdvPP def make_EndOfPrdvFuncCond(self): VLvlNext = ( self.PermShkVals_temp ** (1.0 - self.CRRA) * self.PermGroFac ** (1.0 - self.CRRA) ) * self.vFuncNext(self.mNrmNext) EndOfPrdv_cond = self.DiscFacEff * np.sum(VLvlNext * self.ShkPrbs_temp, axis=0) EndOfPrdvNvrs_cond = self.uinv(EndOfPrdv_cond) EndOfPrdvNvrsP_cond = self.EndOfPrdvP_cond * self.uinvP(EndOfPrdv_cond) EndOfPrdvNvrs_cond = np.insert(EndOfPrdvNvrs_cond, 0, 0.0) EndOfPrdvNvrsP_cond = np.insert(EndOfPrdvNvrsP_cond, 0, EndOfPrdvNvrsP_cond[0]) aNrm_temp = np.insert(self.aNrm_cond, 0, self.BoroCnstNat) EndOfPrdvNvrsFunc_cond = CubicInterp( aNrm_temp, EndOfPrdvNvrs_cond, EndOfPrdvNvrsP_cond ) EndofPrdvFunc_cond = ValueFuncCRRA(EndOfPrdvNvrsFunc_cond, self.CRRA) return EndofPrdvFunc_cond def calc_EndOfPrdvPcond(self): EndOfPrdvPcond = ConsIndShockSolver.calc_EndOfPrdvP(self) return EndOfPrdvPcond def make_EndOfPrdvPfuncCond(self): self.aNrm_cond = self.prepare_to_calc_EndOfPrdvP() self.EndOfPrdvP_cond = self.calc_EndOfPrdvPcond() EndOfPrdvPnvrs_cond = self.uPinv( self.EndOfPrdvP_cond ) if self.CubicBool: EndOfPrdvPP_cond = self.calc_EndOfPrdvPP() EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond * self.uPinvP( self.EndOfPrdvP_cond ) if self.CubicBool: EndOfPrdvPnvrsFunc_cond = CubicInterp( self.aNrm_cond, EndOfPrdvPnvrs_cond, EndOfPrdvPnvrsP_cond, lower_extrap=True, ) else: EndOfPrdvPnvrsFunc_cond = LinearInterp( self.aNrm_cond, EndOfPrdvPnvrs_cond, lower_extrap=True ) EndofPrdvPfunc_cond = MargValueFuncCRRA( EndOfPrdvPnvrsFunc_cond, self.CRRA ) return EndofPrdvPfunc_cond def calc_EndOfPrdvP(self): aNrmMin_unique, state_inverse = np.unique( self.BoroCnstNat_list, return_inverse=True ) self.possible_transitions = self.MrkvArray > 0 EndOfPrdvP = np.zeros((self.StateCount, self.aXtraGrid.size)) EndOfPrdvPP = np.zeros((self.StateCount, self.aXtraGrid.size)) for k in range(aNrmMin_unique.size): aNrmMin = aNrmMin_unique[k] which_states = ( state_inverse == k ) aGrid = aNrmMin + self.aXtraGrid EndOfPrdvP_all = np.zeros((self.StateCount, self.aXtraGrid.size)) EndOfPrdvPP_all = np.zeros((self.StateCount, self.aXtraGrid.size)) for j in range(self.StateCount): if np.any( np.logical_and(self.possible_transitions[:, j], which_states) ): EndOfPrdvP_all[j, :] = self.EndOfPrdvPfunc_list[j](aGrid) if ( self.CubicBool ): EndOfPrdvPP_all[j, :] = self.EndOfPrdvPfunc_list[j].derivativeX( aGrid ) EndOfPrdvP_temp = np.dot(self.MrkvArray, EndOfPrdvP_all) EndOfPrdvP[which_states, :] = EndOfPrdvP_temp[ which_states, : ] if self.CubicBool: EndOfPrdvPP_temp = np.dot(self.MrkvArray, EndOfPrdvPP_all) EndOfPrdvPP[which_states, :] = EndOfPrdvPP_temp[which_states, :] LivPrb_tiled = np.tile( np.reshape(self.LivPrb, (self.StateCount, 1)), (1, self.aXtraGrid.size) ) self.EndOfPrdvP = LivPrb_tiled * EndOfPrdvP if self.CubicBool: self.EndOfPrdvPP = LivPrb_tiled * EndOfPrdvPP def calc_HumWealth_and_BoundingMPCs(self): WorstIncPrb_array = self.BoroCnstDependency * np.tile( np.reshape(self.WorstIncPrbAll, (1, self.StateCount)), (self.StateCount, 1) ) temp_array = self.MrkvArray * WorstIncPrb_array WorstIncPrbNow = np.sum( temp_array, axis=1 ) ExMPCmaxNext = ( np.dot( temp_array, self.Rfree_list ** (1.0 - self.CRRA) * self.solution_next.MPCmax ** (-self.CRRA), ) / WorstIncPrbNow ) ** (-1.0 / self.CRRA) DiscFacEff_temp = self.DiscFac * self.LivPrb self.MPCmaxNow = 1.0 / ( 1.0 + ((DiscFacEff_temp * WorstIncPrbNow) ** (1.0 / self.CRRA)) / ExMPCmaxNext ) self.MPCmaxEff = self.MPCmaxNow self.MPCmaxEff[self.BoroCnstNat_list < self.mNrmMin_list] = 1.0 hNrmPlusIncNext = self.Ex_IncNextAll + self.solution_next.hNrm self.hNrmNow = np.dot( self.MrkvArray, (self.PermGroFac_list / self.Rfree_list) * hNrmPlusIncNext ) temp = ( DiscFacEff_temp * np.dot( self.MrkvArray, self.solution_next.MPCmin ** (-self.CRRA) * self.Rfree_list ** (1.0 - self.CRRA), ) ) ** (1.0 / self.CRRA) self.MPCminNow = 1.0 / (1.0 + temp) def make_solution(self, cNrm, mNrm): solution = ( ConsumerSolution() ) if self.CubicBool: dcda = self.EndOfPrdvPP / self.uPP(np.array(self.cNrmNow)) MPC = dcda / (dcda + 1.0) self.MPC_temp = np.hstack( (np.reshape(self.MPCmaxNow, (self.StateCount, 1)), MPC) ) interpfunc = self.make_cubic_cFunc else: interpfunc = self.make_linear_cFunc for i in range(self.StateCount): self.hNrmNow_j = self.hNrmNow[i] self.MPCminNow_j = self.MPCminNow[i] if self.CubicBool: self.MPC_temp_j = self.MPC_temp[i, :] self.cFuncNowCnst = LinearInterp( [self.mNrmMin_list[i], self.mNrmMin_list[i] + 1.0], [0.0, 1.0] ) cFuncNowUnc = interpfunc(mNrm[i, :], cNrm[i, :]) cFuncNow = LowerEnvelope(cFuncNowUnc, self.cFuncNowCnst) vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA) solution_cond = ConsumerSolution( cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow ) if ( self.CubicBool ): solution_cond = self.add_vPPfunc(solution_cond) solution.append_solution(solution_cond) solution.mNrmMin = self.mNrmMin_list solution = self.add_MPC_and_human_wealth(solution) if self.vFuncBool: vFuncNow = self.make_vFunc(solution) solution.vFunc = vFuncNow return solution def make_linear_cFunc(self, mNrm, cNrm): cFuncUnc = LinearInterp( mNrm, cNrm, self.MPCminNow_j * self.hNrmNow_j, self.MPCminNow_j ) return cFuncUnc def make_cubic_cFunc(self, mNrm, cNrm): cFuncUnc = CubicInterp( mNrm, cNrm, self.MPC_temp_j, self.MPCminNow_j * self.hNrmNow_j, self.MPCminNow_j, ) return cFuncUnc def make_vFunc(self, solution): vFuncNow = [] for i in range(self.StateCount): mNrmMin = self.mNrmMin_list[i] mGrid = mNrmMin + self.aXtraGrid cGrid = solution.cFunc[i](mGrid) aGrid = mGrid - cGrid EndOfPrdv_all = np.zeros((self.StateCount, self.aXtraGrid.size)) for j in range(self.StateCount): if self.possible_transitions[i, j]: EndOfPrdv_all[j, :] = self.EndOfPrdvFunc_list[j](aGrid) EndOfPrdv = np.dot(self.MrkvArray[i, :], EndOfPrdv_all) vNrmNow = self.u(cGrid) + EndOfPrdv vPnow = self.uP(cGrid) vNvrs = self.uinv(vNrmNow) vNvrsP = vPnow * self.uinvP(vNrmNow) mNrm_temp = np.insert(mGrid, 0, mNrmMin) vNvrs = np.insert(vNvrs, 0, 0.0) vNvrsP = np.insert( vNvrsP, 0, self.MPCmaxEff[i] ** (-self.CRRA / (1.0 - self.CRRA)) ) MPCminNvrs = self.MPCminNow[i] ** (-self.CRRA / (1.0 - self.CRRA)) vNvrsFunc_i = CubicInterp( mNrm_temp, vNvrs, vNvrsP, MPCminNvrs * self.hNrmNow[i], MPCminNvrs ) vFunc_i = ValueFuncCRRA(vNvrsFunc_i, self.CRRA) vFuncNow.append(vFunc_i) return vFuncNow def _solve_ConsMarkov( solution_next, IncShkDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, MrkvArray, BoroCnstArt, aXtraGrid, vFuncBool, CubicBool, ): solver = ConsMarkovSolver( solution_next, IncShkDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, MrkvArray, BoroCnstArt, aXtraGrid, vFuncBool, CubicBool, ) solution_now = solver.solve() return solution_now class MarkovConsumerType(IndShockConsumerType): time_vary_ = IndShockConsumerType.time_vary_ + ["MrkvArray"] shock_vars_ = IndShockConsumerType.shock_vars_ + ["Mrkv"] state_vars = IndShockConsumerType.state_vars + ["Mrkv"] def __init__(self, **kwds): IndShockConsumerType.__init__(self, **kwds) self.solve_one_period = _solve_ConsMarkov if not hasattr(self, "global_markov"): self.global_markov = False def check_markov_inputs(self): StateCount = self.MrkvArray[0].shape[0] if not isinstance(self.Rfree, np.ndarray) or self.Rfree.shape != (StateCount,): raise ValueError( "Rfree not the right shape, it should an array of Rfree of all the states." ) for MrkvArray_t in self.MrkvArray: if not isinstance(MrkvArray_t, np.ndarray) or MrkvArray_t.shape != ( StateCount, StateCount, ): raise ValueError( "MrkvArray not the right shape, it should be of the size states*statres." ) for LivPrb_t in self.LivPrb: if not isinstance(LivPrb_t, np.ndarray) or LivPrb_t.shape != (StateCount,): raise ValueError( "Array in LivPrb is not the right shape, it should be an array of length equal to number of states" ) for PermGroFac_t in self.PermGroFac: if not isinstance(PermGroFac_t, np.ndarray) or PermGroFac_t.shape != ( StateCount, ): raise ValueError( "Array in PermGroFac is not the right shape, it should be an array of length equal to number of states" ) for IncShkDstn_t in self.IncShkDstn: if not isinstance(IncShkDstn_t, list): raise ValueError( "self.IncShkDstn is time varying and so must be a list" + "of lists of Distributions, one per Markov State. Found " + f"{self.IncShkDstn} instead" ) elif len(IncShkDstn_t) != StateCount: raise ValueError( "List in IncShkDstn is not the right length, it should be length equal to number of states" ) def pre_solve(self): AgentType.pre_solve(self) self.check_markov_inputs()
Apache License 2.0
ziirish/burp-ui
burpui/misc/parser/burp1.py
Parser._new_template_conf
python
def _new_template_conf(self, name, path): self._load_conf_templates(name, path) return self._templates_conf[name]
Create new template conf
https://github.com/ziirish/burp-ui/blob/668922753d97f0a71844d6985d9b8b2695fb2421/burpui/misc/parser/burp1.py#L204-L207
import re import os import json import codecs import shutil from glob import glob from .doc import Doc from .utils import Config from .openssl import OSSLConf, OSSLAuth from ...exceptions import BUIserverException from ...utils import NOTIF_ERROR, NOTIF_OK, NOTIF_WARN class Parser(Doc): pver = 1 _assume_server_version = "" _assume_client_version = "" def __init__(self, backend=None): self.backend = backend self.clients = [] self._server_conf = {} self._client_conf = {} self._clients_conf = {} self._templates_conf = {} self._static_templates_conf = {} self.clientconfdir = None self.clientconfdir_mtime = None self.templates = [] self.templates_dir = ".buitemplates" self.templates_path = None self.templates_mtime = None self.static_templates = [] self.static_templates_dir = ".buistatictemplates" self.static_templates_path = None self.static_templates_mtime = None self.filescache = {} self._configs = {} self.root = None if self.backend: self.init_app() def init_app(self, confsrv=None, confcli=None): self.conf = confsrv or getattr(self.backend, "burpconfsrv", None) self.confcli = confcli or getattr(self.backend, "burpconfcli", None) self.logger.info("Parser initialized with: {}".format(self.conf)) if self.conf: self.root = os.path.dirname(self.conf) self._load_all_conf() ca_conf = self.server_conf.get("ca_conf") if self._is_secure_path(ca_conf): self.openssl_conf = OSSLConf(ca_conf) else: self.openssl_conf = OSSLConf(os.devnull) self.openssl_auth = OSSLAuth( self.server_conf.get("ca_name"), self.openssl_conf, self.server_conf ) @property def server_conf(self): if self._server_conf.changed: self._load_conf_srv() return self._server_conf @property def client_conf(self): if self._client_conf.changed: self._load_conf_cli() return self._client_conf @property def clients_conf(self): if self._clientconfdir_changed(): self._load_conf_clients() for client, conf in self._clients_conf.items(): if conf.changed: conf.parse(True) return self._clients_conf def _cleanup(self): self._server_conf.clear() self._client_conf.clear() self._clients_conf.clear() def _refresh_cache(self, purge=False): if purge: self._cleanup() self._list_templates(True) self._list_static_templates(True) self._list_clients(True) def _load_conf_srv(self): self._server_conf = Config(self.conf, self, "srv") self._server_conf.parse() self.clientconfdir = self._server_conf.get("clientconfdir") self.templates_path = os.path.join(self.clientconfdir, self.templates_dir) if not os.path.exists(self.templates_path): try: os.makedirs(self.templates_path, 0o755) except OSError as exp: self.logger.warning(str(exp)) self.static_templates_path = os.path.join( self.clientconfdir, self.static_templates_dir ) if not os.path.exists(self.static_templates_path): try: os.makedirs(self.static_templates_path, 0o755) except OSError as exp: self.logger.warning(str(exp)) def _load_conf_cli(self): self._client_conf = Config(self.confcli, self, "cli") self._client_conf.parse() def _load_conf_clients(self, name=None, in_path=None): if name: clients = [{"name": name, "value": in_path}] else: clients = self._list_clients(True) for cli in clients: conf = self.server_conf.clone() path = cli["value"] or cli["name"] if cli["name"] not in self._clients_conf: if not os.path.isabs(path): path = os.path.join(self.clientconfdir, path) conf.add_file(path) conf.set_default(path) conf.parse() self._clients_conf[cli["name"]] = conf def _load_conf_templates(self, name=None, in_path=None): if name: templates = [{"name": name, "value": in_path}] else: templates = self._list_templates(True) for template in templates: conf = self.server_conf.clone() conf.set_template(True) path = os.path.join(self.templates_path, template["name"]) if template["name"] not in self._templates_conf: conf.add_file(path) conf.set_default(path) conf.parse() self._templates_conf[template["name"]] = conf def _load_conf_static_templates(self, name=None, in_path=None): if name: templates = [{"name": name, "value": in_path}] else: templates = self._list_static_templates(True) for template in templates: conf = self.server_conf.clone() path = os.path.join(self.static_templates_path, template["name"]) if template["name"] not in self._static_templates_conf: conf.add_file(path) conf.set_default(path) conf.parse() self._static_templates_conf[template["name"]] = conf def _load_all_conf(self): self._cleanup() self._load_conf_srv() self._load_conf_cli() self._load_conf_clients() self._load_conf_templates() def _new_client_conf(self, name, path): self._load_conf_clients(name, path) return self.clients_conf[name]
BSD 3-Clause New or Revised License
cohesity/management-sdk-python
cohesity_management_sdk/models/node_to_tiered_storage_directories_map.py
NodeToTieredStorageDirectoriesMap.__init__
python
def __init__(self, cassandra_node_name=None, tiered_storage_directories_vec=None): self.cassandra_node_name = cassandra_node_name self.tiered_storage_directories_vec = tiered_storage_directories_vec
Constructor for the NodeToTieredStorageDirectoriesMap class
https://github.com/cohesity/management-sdk-python/blob/1c085d5a10f5f1a87b700e7ad1fc1dcabda41ae5/cohesity_management_sdk/models/node_to_tiered_storage_directories_map.py#L24-L31
class NodeToTieredStorageDirectoriesMap(object): _names = { "cassandra_node_name": 'cassandraNodeName', "tiered_storage_directories_vec": 'tieredStorageDirectoriesVec' }
Apache License 2.0
xanaduai/strawberryfields
strawberryfields/circuitdrawer.py
Circuit._init_document
python
def _init_document(self): self._document = INIT_DOCUMENT
Adds the required latex headers to the document.
https://github.com/xanaduai/strawberryfields/blob/c1eed81a93419cb9c28a6ca205925691063722ce/strawberryfields/circuitdrawer.py#L534-L536
import datetime import os DOCUMENT_CLASS = r"\documentclass{article}" EMPTY_PAGESTYLE = r"\pagestyle{empty}" QCIRCUIT_PACKAGE = r"\usepackage{qcircuit}" BEGIN_DOCUMENT = r"\begin{document}" DOCUMENT_END = r"\end{document}" CIRCUIT_START = r"\Qcircuit" COLUMN_SPACING = "@C={0}" ROW_SPACING = "@R={0}" UNIFORM_ROW_SPACING = "@!R" UNIFORM_COLUMN_SPACING = "@!C" UNIFORM_ELEMENT_SPACING = "@!" QUANTUM_WIRE = r"\qw" MULTI_QUANTUM_WIRE = r"\qw[{0}]" VERTICAL_QUANTUM_WIRE = r"\qwx[{0}]" WIRE_END = r"\qwa[{0}]" CLASSICAL_WIRE = r"\cw[{0}]" CLASSICAL_WIRE_END = r"\cwa[{0}]" VERTICAL_CLASSICAL_WIRE = r"\cwx[{0}]" LABELLED_GATE = r"\gate{{{0}}}" TARGET = r"\targ" SWAP = r"\qswap" MULTIGATE = r"\multigate{{{0}}}{{{1}}}" NON_ADJACENT_MULTIGATE = r"\sgate{{{0}}}{{{1}}}" GHOST = r"\ghost{{{0}}}" CLASSICAL_GHOST = r"\cghost{{{0}}}" NO_GHOST = r"\nghost{{{0}}}" CONTROL = r"\ctrl{{{0}}}" CONTROL_ON_ZERO = r"\ctrlo{{{0}}}" CLASSICAL_CONTROL = r"\cctrl{{{0}}}" CLASSICAL_CONTROL_ON_ZERO = r"\cctrlo{{{0}}}" ISOLATED_CONTROL = r"\control" ISOLATED_CONTROL_ON_ZERO = r"\controlo" METER = r"\meter" BASIS_METER = r"\meterB{{{0}}}" SPLIT_BASIS_METER = r"\smeterB{{{0}}}{{{1}}}" MEASURE = r"\measuretab{{{0}}}" MULTIMEASURE = r"\multimeasure{{{0}}}{{{1}}}" LEFT_WIRE_LABEL = r"\lstick{{{0}}}" RIGHT_WIRE_LABEL = r"\rstick{{{0}}}" BRA = r"\bra{{{0}}}" KET = r"\ket{{{0}}}" HADAMARD_COMP = LABELLED_GATE.format("H") PAULI_X_COMP = LABELLED_GATE.format("X") PAULI_Y_COMP = LABELLED_GATE.format("Y") PAULI_Z_COMP = LABELLED_GATE.format("Z") D_COMP = LABELLED_GATE.format("D") S_COMP = LABELLED_GATE.format("S") R_COMP = LABELLED_GATE.format("R") P_COMP = LABELLED_GATE.format("P") V_COMP = LABELLED_GATE.format("V") K_COMP = LABELLED_GATE.format("K") FOURIER_COMP = LABELLED_GATE.format("F") BS_MULTI_COMP = "BS" S_MULTI_COMP = "S" WIRE_OPERATION = "& {0}" WIRE_TERMINATOR = r"\\" + "\n" CIRCUIT_BODY_TERMINATOR = "}\n" CIRCUIT_BODY_START = " {" + "\n" INIT_DOCUMENT = ( DOCUMENT_CLASS + "\n" + EMPTY_PAGESTYLE + "\n" + QCIRCUIT_PACKAGE + "\n" + BEGIN_DOCUMENT + "\n" + CIRCUIT_START ) PIPE = "|" LINE_RETURN = "\n" class NotDrawableException(Exception): pass class ModeMismatchException(Exception): pass class UnsupportedGateException(Exception): pass class Circuit: _circuit_matrix = [] def __init__(self, wires): self._document = "" self._circuit_matrix = [[QUANTUM_WIRE.format(1)] for wire in range(wires)] self._column_spacing = None self._row_spacing = None self.single_mode_gates = { "Xgate": self._x, "Zgate": self._z, "Dgate": self._d, "Sgate": self._s, "Rgate": self._r, "Pgate": self._p, "Vgate": self._v, "Kgate": self._k, "Fourier": self._fourier, } self.two_mode_gates = { "CXgate": self._cx, "CZgate": self._cz, "CKgate": self._ck, "BSgate": self._bs, "S2gate": self._s2, } def _gate_from_operator(self, op): operator = str(op).split(PIPE)[0] method = None mode = None for two_mode_gate in self.two_mode_gates: if two_mode_gate in operator: method = self.two_mode_gates[two_mode_gate] mode = 2 if method is None: for single_mode_gate in self.single_mode_gates: if single_mode_gate in operator: method = self.single_mode_gates[single_mode_gate] mode = 1 return method, mode def parse_op(self, op): if not op.__class__.__name__ == "Command": return method, mode = self._gate_from_operator(op) wires = list(map(lambda register: register.ind, op.reg)) if method is None: raise UnsupportedGateException( "Unsupported operation {0} not printable by circuit builder!".format(str(op)) ) if mode == len(wires): method(*wires) else: raise ModeMismatchException( "{0} mode gate applied to {1} wires!".format(mode, len(wires)) ) def _x(self, wire): self._single_mode_gate(wire, PAULI_X_COMP) def _z(self, wire): self._single_mode_gate(wire, PAULI_Z_COMP) def _s(self, wire): self._single_mode_gate(wire, S_COMP) def _d(self, wire): self._single_mode_gate(wire, D_COMP) def _r(self, wire): self._single_mode_gate(wire, R_COMP) def _p(self, wire): self._single_mode_gate(wire, P_COMP) def _v(self, wire): self._single_mode_gate(wire, V_COMP) def _k(self, wire): self._single_mode_gate(wire, K_COMP) def _fourier(self, wire): self._single_mode_gate(wire, FOURIER_COMP) def _cx(self, source_wire, target_wire): self._controlled_mode_gate(source_wire, target_wire, TARGET) def _cz(self, source_wire, target_wire): self._controlled_mode_gate(source_wire, target_wire, PAULI_Z_COMP) def _ck(self, source_wire, target_wire): self._controlled_mode_gate(source_wire, target_wire, K_COMP) def _bs(self, first_wire, second_wire): self._multi_mode_gate(BS_MULTI_COMP, [first_wire, second_wire]) def _s2(self, first_wire, second_wire): self._multi_mode_gate(S_MULTI_COMP, [first_wire, second_wire]) def _single_mode_gate(self, wire, circuit_op): matrix = self._circuit_matrix wire_ops = matrix[wire] if Circuit._is_empty(wire_ops[-1]): wire_ops[-1] = circuit_op else: wire_ops.append(circuit_op) for prev_wire in matrix[:wire]: prev_wire.append(QUANTUM_WIRE.format(1)) for post_wire in matrix[wire + 1 :]: post_wire.append(QUANTUM_WIRE.format(1)) def _multi_mode_gate(self, circuit_op, wires): matrix = self._circuit_matrix if not self._on_empty_column(): self._add_column() wires.sort() first_wire = wires.pop(0) wire_ops = matrix[first_wire] wire_ops[-1] = MULTIGATE.format(1, circuit_op) matrix[first_wire] = wire_ops previous_wire = first_wire for wire in wires: if not previous_wire == wire - 1: raise NotDrawableException( "{0} multi-mode gate applied to non-adjacent wires!".format(circuit_op) ) wire_ops = matrix[wire] wire_ops[-1] = GHOST.format(circuit_op) matrix[wire] = wire_ops previous_wire = wire self._circuit_matrix = matrix def _controlled_mode_gate(self, source_wire, target_wire, circuit_op): matrix = self._circuit_matrix source_ops = matrix[source_wire] target_ops = matrix[target_wire] distance = target_wire - source_wire if Circuit._is_empty(source_ops[-1]) and Circuit._is_empty(target_ops[-1]): source_ops[-1] = CONTROL.format(distance) target_ops[-1] = circuit_op else: for index, wire_ops in enumerate(matrix): if index == source_wire: wire_ops.append(CONTROL.format(distance)) elif index == target_wire: wire_ops.append(circuit_op) else: wire_ops.append(QUANTUM_WIRE.format(1)) def _on_empty_column(self): matrix = self._circuit_matrix empty_column = True for wire in enumerate(matrix): wire_ops = wire[1] if not Circuit._is_empty(wire_ops[-1]): empty_column = False break return empty_column def _add_column(self): for wire in self._circuit_matrix: wire.append(QUANTUM_WIRE.format(1)) @staticmethod def _is_empty(op): return op == QUANTUM_WIRE.format(1) def _set_column_spacing(self, spacing): self._column_spacing = spacing def _set_row_spacing(self, spacing): self._row_spacing = spacing @staticmethod def _pad_with_spaces(string): return " " + string + " " def dump_to_document(self): self._init_document() self._apply_spacing() self._begin_circuit() self._add_column() for wire_ops in enumerate(self._circuit_matrix): for wire_op in wire_ops[1]: self._write_operation_to_document(wire_op) self._end_wire() self._end_circuit() self._end_document() return self._document def compile_document(self, tex_dir="./circuit_tex"): tex_dir = os.path.abspath(tex_dir) if not os.path.isdir(tex_dir): os.mkdir(tex_dir) file_name = "output_{0}".format(datetime.datetime.now().strftime("%Y_%B_%d_%I:%M%p")) file_path = "{0}/{1}.tex".format(tex_dir, file_name) with open(file_path, "w+") as output_file: output_file.write(self._document) return file_path
Apache License 2.0
tktech/pynbt
pynbt.py
BaseTag.pretty
python
def pretty(self, indent=0, indent_str=' '): return '{0}{1}({2!r}): {3!r}'.format( indent_str * indent, self.__class__.__name__, self.name, self.value )
Pretty-print a tag in the same general style as Markus's example output.
https://github.com/tktech/pynbt/blob/bd7e545fee00e509276ee3dc90cb23949e450cda/pynbt.py#L163-L173
__all__ = ( 'NBTFile', 'TAG_Byte', 'TAG_Short', 'TAG_Int', 'TAG_Long', 'TAG_Float', 'TAG_Double', 'TAG_Byte_Array', 'TAG_String', 'TAG_List', 'TAG_Compound', 'TAG_Int_Array', 'TAG_Long_Array' ) from functools import partial from struct import unpack, pack import mutf8 class BaseTag(object): def __init__(self, value, name=None): self.name = name self.value = value @staticmethod def _read_utf8(read): name_length = read('h', 2)[0] return mutf8.decode_modified_utf8(read.src.read(name_length)) @staticmethod def _write_utf8(write, value): encoded_value = mutf8.encode_modified_utf8(value) write('h', len(encoded_value)) write.dst.write(encoded_value) @classmethod def read(cls, read, has_name=True): name = cls._read_utf8(read) if has_name else None if cls is TAG_Compound: final = {} while True: tag = read('b', 1)[0] if tag == 0: break tmp = _tags[tag].read(read) final[tmp.name] = tmp return cls(final, name=name) elif cls is TAG_List: tag_type, length = read('bi', 5) tag_read = _tags[tag_type].read return cls( _tags[tag_type], [tag_read(read, has_name=False) for x in range(0, length)], name=name ) elif cls is TAG_String: value = cls._read_utf8(read) return cls(value, name=name) elif cls is TAG_Byte_Array: length = read('i', 4)[0] return cls(bytearray(read.src.read(length)), name=name) elif cls is TAG_Int_Array: length = read('i', 4)[0] return cls(read('{0}i'.format(length), length * 4), name=name) elif cls is TAG_Long_Array: length = read('i', 4)[0] return cls(read('{0}q'.format(length), length * 8), name=name) elif cls is TAG_Byte: return cls(read('b', 1)[0], name=name) elif cls is TAG_Short: return cls(read('h', 2)[0], name=name) elif cls is TAG_Int: return cls(read('i', 4)[0], name=name) elif cls is TAG_Long: return cls(read('q', 8)[0], name=name) elif cls is TAG_Float: return cls(read('f', 4)[0], name=name) elif cls is TAG_Double: return cls(read('d', 8)[0], name=name) elif cls is TAG_End: return cls(read('2b', 2)[0], name=name) def write(self, write): if self.name is not None: if isinstance(self, NBTFile): write('b', 0x0A) else: write('b', _tags.index(self.__class__)) self._write_utf8(write, self.name) if isinstance(self, TAG_List): write('bi', _tags.index(self.type_), len(self.value)) for item in self.value: if not isinstance(item, self.type_): item = self.type_(item) item.write(write) elif isinstance(self, TAG_Compound): for v in self.value.values(): v.write(write) write('b', 0) elif isinstance(self, TAG_String): self._write_utf8(write, self.value) elif isinstance(self, TAG_Int_Array): length = len(self.value) write('i{0}i'.format(length), length, *self.value) elif isinstance(self, TAG_Long_Array): length = len(self.value) write('i{0}q'.format(length), length, *self.value) elif isinstance(self, TAG_Byte_Array): write('i', len(self.value)) write.dst.write(bytes(self.value)) elif isinstance(self, TAG_Byte): write('b', self.value) elif isinstance(self, TAG_Short): write('h', self.value) elif isinstance(self, TAG_Int): write('i', self.value) elif isinstance(self, TAG_Long): write('q', self.value) elif isinstance(self, TAG_Float): write('f', self.value) elif isinstance(self, TAG_Double): write('d', self.value)
MIT License
lululxvi/deeponet
seq2seq/learner/integrator/hamiltonian/stormer_verlet.py
SV.__init__
python
def __init__(self, H, dH, iterations=10, order=4, N=1): self.H = H self.dH = dH self.iterations = iterations self.order = order self.N = N
H: H(x) or None dH: dp,dq=dH(p,q) or None ``iterations`` is encouraged to be 1 if H is separable.
https://github.com/lululxvi/deeponet/blob/7a110731b73bcdeabfcff819f353fa87b0543cff/seq2seq/learner/integrator/hamiltonian/stormer_verlet.py#L12-L22
import numpy as np import torch from ...utils import grad class SV:
Apache License 2.0
jmchilton/galaxy-central
tools/mdea/AlphaSubstPrep.py
AlphaSubstPrep.PrevAlphabetChar
python
def PrevAlphabetChar(self,CurrChar): RevAlphabet = "ZYXWVUTSRQPONMLKJIHGFEDCBA" if CurrChar == "": return "Z" else: return RevAlphabet[RevAlphabet.find(CurrChar) + 1]
This function will return the previous character in the alphabet
https://github.com/jmchilton/galaxy-central/blob/31e2fd3a32b06ddfba06ae5b044efdce1d93f08c/tools/mdea/AlphaSubstPrep.py#L270-L276
import re import string class AlphaSubstPrep: def __init__(self): def PrepBaseML(self,AnalysisType,TreeData,SequenceCount,CompType,UserRandomKey,BaseMLLocation,SubstModel,GetSE,DoIntAlpha,DoExtAlpha,GalaxyLocation,FixAlpha,AlphaValue,FixKappa,KappaValue,FixRho,RhoValue): self.Group1Branches = [] self.Group2Branches = [] self.Group1IntBranches = [] self.Group2IntBranches = [] self.Group1ExtBranches = [] self.Group2ExtBranches = [] self.InternalBranches = [] self.ExternalBranches = [] if int(AnalysisType) == 0: AlignmentTogether = 1 else: AlignmentTogether = 0 SpaceRemover = re.compile(" ") TreeData = SpaceRemover.sub("",TreeData) self.WriteTreeDef(AlignmentTogether,TreeData,SequenceCount,CompType,UserRandomKey,DoIntAlpha,DoExtAlpha) self.WriteBaseMLctl(BaseMLLocation,UserRandomKey,SubstModel,GetSE,GalaxyLocation,FixAlpha,AlphaValue,FixKappa,KappaValue,FixRho,RhoValue) def WriteBaseMLctl(self,BaseMLLocation,UserRandomKey,SubstModel,GetSE,GalaxyLocation,FixAlpha,AlphaValue,FixKappa,KappaValue,FixRho,RhoValue): OutFile = GalaxyLocation + "tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.out" TreeFile = GalaxyLocation + "tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.tree" SeqFile = GalaxyLocation + "tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.seq" BaseMLControlFile = "\n outfile = " + OutFile + "\n treefile = " + TreeFile + "\n seqfile = " + SeqFile + "\n\n" BaseMLControlFile += " noisy = 0\n verbose = 0\n runmode = 0\n model = " + str(SubstModel) + "\n" BaseMLControlFile += " Mgene = 0\n fix_kappa = " + str(FixKappa) + "\n kappa = " + str(KappaValue) + "\n" BaseMLControlFile += " fix_alpha = " + str(FixAlpha) + "\n alpha = " + str(AlphaValue) + "\n Malpha = 0\n" BaseMLControlFile += " ncatG = 5\n fix_rho = " + str(FixRho) + "\n rho = " + str(RhoValue) + "\n nparK = 0\n clock = 0\n nhomo = 0\n" BaseMLControlFile += " getSE = " + str(GetSE) + "\n RateAncestor = 0\n Small_Diff = 7e-6\n" BaseMLControlFile += " cleandata = 1\n method = 0\n" self.WriteFile("tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-baseml.ctl",BaseMLControlFile) def WriteTreeDef(self,AlignmentTogether,TreeData,SequenceCount,CompType,UserRandomKey,DoIntAlpha,DoExtAlpha): RemoveAssign = re.compile('[_AWXYZ]') BaseTreeDef = RemoveAssign.sub("",TreeData) self.BranchDescriptions = self.ExplodeTree(BaseTreeDef,SequenceCount) self.DetermineBranchOwnership(AlignmentTogether, self.BranchDescriptions,TreeData,CompType,SequenceCount,DoIntAlpha,DoExtAlpha) BaseTreeDef = " " + str(SequenceCount) + " 1\n\n" + str(BaseTreeDef) self.WriteFile("tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.tree",BaseTreeDef) def DetermineBranchOwnership(self,AlignmentTogether,BranchDesc,TreeData,CompType,SequenceCount,DoIntAlpha,DoExtAlpha): Group1IDS = [] Group2IDS = [] OrderedBranchList = [] Group1Tag = self.ReturnGroupTag(CompType,"1") Group2Tag = self.ReturnGroupTag(CompType,"2") CommaSplitter = re.compile(',') TwoPeriodSplitter = re.compile('\.\.') UnderScoreSplitter = re.compile('_') RemoveParens = re.compile('[\(\)]') if str(AlignmentTogether) == "1": TreeDefinition = RemoveParens.sub("",TreeData) TreeDefinition = CommaSplitter.split(TreeDefinition) for NodeAssignment in TreeDefinition: if CompType != 0: NodeParts = UnderScoreSplitter.split(NodeAssignment) if NodeParts[1][0:] == Group1Tag: Group1IDS.append(NodeParts[0]) elif NodeParts[1][0:] == Group2Tag: Group2IDS.append(NodeParts[0]) HighestBranch = 0 for Branch in BranchDesc: BranchParts = TwoPeriodSplitter.split(Branch) if int(BranchParts[0]) > int(HighestBranch): HighestBranch = BranchParts[0] if int(BranchParts[1]) > int(HighestBranch): HighestBranch = BranchParts[1] for CountingIndex in range(1,int(HighestBranch)): for BranchIndex in range(0,len(BranchDesc)): if str(BranchDesc[BranchIndex]) != "": BranchParts = TwoPeriodSplitter.split(BranchDesc[BranchIndex]) if str(BranchParts[0]) == str(CountingIndex) or str(BranchParts[1]) == str(CountingIndex): BranchString = str(BranchParts[0]) + ".." + str(BranchParts[1]) if not OrderedBranchList.__contains__(BranchString): OrderedBranchList.append(BranchString) OriginNode = int(SequenceCount) + 1 InternalBranchesPresent1 = 0 ExternalBranchesPresent1 = 0 InternalBranchesPresent2 = 0 ExternalBranchesPresent2 = 0 for Branch in OrderedBranchList: BranchParts = TwoPeriodSplitter.split(Branch) BranchString = str(BranchParts[0]) + ".." + str(BranchParts[1]) if str(AlignmentTogether) == "1": for Group1 in Group1IDS: if int(Group1) == int(BranchParts[0]) or int(Group1) == int(BranchParts[1]): if str(OriginNode) != str(BranchParts[0]) and str(OriginNode) != str(BranchParts[1]): if not Group1IDS.__contains__(str(BranchParts[0])): Group1IDS.append(str(BranchParts[0])) if not Group1IDS.__contains__(str(BranchParts[1])): Group1IDS.append(str(BranchParts[1])) if not self.Group1Branches.__contains__(BranchString): self.Group1Branches.append(BranchString) if int(DoIntAlpha) == 1: if int(BranchParts[0]) > int(SequenceCount) and int(BranchParts[1]) > int(SequenceCount): if not self.Group1IntBranches.__contains__(BranchString): self.Group1IntBranches.append(BranchString) InternalBranchesPresent1 = 1 if int(DoExtAlpha) == 1: if int(BranchParts[0]) <= int(SequenceCount) or int(BranchParts[1]) <= int(SequenceCount): if not self.Group1ExtBranches.__contains__(BranchString): self.Group1ExtBranches.append(BranchString) ExternalBranchesPresent1 = 1 for Group2 in Group2IDS: BranchString = str(BranchParts[0]) + ".." + str(BranchParts[1]) if str(Group2) == str(BranchParts[0]) or str(Group2) == str(BranchParts[1]): if str(OriginNode) != str(BranchParts[0]) and str(OriginNode) != str(BranchParts[1]): if not Group2IDS.__contains__(str(BranchParts[0])): Group2IDS.append(str(BranchParts[0])) if not Group2IDS.__contains__(str(BranchParts[1])): Group2IDS.append(str(BranchParts[1])) if not self.Group2Branches.__contains__(BranchString): self.Group2Branches.append(str(BranchParts[0]) + ".." + str(BranchParts[1])) if int(DoIntAlpha) == 1: if int(BranchParts[0]) > int(SequenceCount) and int(BranchParts[1]) > int(SequenceCount): if not self.Group2IntBranches.__contains__(BranchString): self.Group2IntBranches.append(BranchString) InternalBranchesPresent2 = 1 if int(DoExtAlpha) == 1: if int(BranchParts[0]) <= int(SequenceCount) or int(BranchParts[1]) <= int(SequenceCount): if not self.Group2ExtBranches.__contains__(BranchString): self.Group2ExtBranches.append(BranchString) ExternalBranchesPresent2 = 1 else: if int(DoIntAlpha) == 1: if int(BranchParts[0]) > int(SequenceCount) and int(BranchParts[1]) > int(SequenceCount): if not self.InternalBranches.__contains__(BranchString): self.InternalBranches.append(BranchString) InternalBranchesPresent1 = 1 InternalBranchesPresent2 = 1 if int(DoExtAlpha) == 1: if int(BranchParts[0]) <= int(SequenceCount) or int(BranchParts[1]) <= int(SequenceCount): if not self.ExternalBranches.__contains__(BranchString): self.ExternalBranches.append(BranchString) if int(BranchParts[0]) != int(SequenceCount) + 1 and int(BranchParts[1]) != int(SequenceCount) + 1: ExternalBranchesPresent1 = 1 ExternalBranchesPresent2 = 1 if int(DoIntAlpha) == 1 and InternalBranchesPresent1 == 1 and InternalBranchesPresent2 == 1: self.DoIntAlpha = 1 else: self.DoIntAlpha = 0 if int(DoExtAlpha) == 1 and ExternalBranchesPresent1 == 1 and ExternalBranchesPresent2 == 1: self.DoExtAlpha = 1 else: self.DoExtAlpha = 0 def ReturnGroupTag(self,CompType,GroupID): if str(CompType) == "1": if GroupID == "1": return "Y" else: return "X" elif str(CompType) == "2": if GroupID == "1": return "Y" else: return "A" elif str(CompType) == "3": if GroupID == "1": return "X" else: return "A" elif str(CompType) == "4": if GroupID == "1": return "Z" else: return "W" elif str(CompType) == "5": if GroupID == "1": return "Z" else: return "A" elif str(CompType) == "6": if GroupID == "1": return "W" else: return "A" return "" def ExplodeTree(self,TreeData,SequenceCount): NodePairs = [] NodeIDs = [] NodeLetters = [] NodeComponents = [] BranchDescription = [] FoundPair = "0" Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" CurrentAlphabetChar = "" CommaSplitter = re.compile(',') while (1): FoundPair = "0" for RevIndex in range(len(TreeData)-1,-1,-1): if TreeData[RevIndex] == ")": for InnerRevIndex in range(RevIndex-1,-1,-1): if TreeData[InnerRevIndex] == ")": break elif TreeData[InnerRevIndex] == "(": FoundPair = "1" NodePairs.append(TreeData[InnerRevIndex+1:RevIndex]) CurrentAlphabetChar = self.PrevAlphabetChar(CurrentAlphabetChar) NodeLetters.append(CurrentAlphabetChar) TreeData = TreeData[:InnerRevIndex] + str(CurrentAlphabetChar) + TreeData[RevIndex+1:] break if FoundPair == "1": break if FoundPair == "0": break SequenceCount = int(SequenceCount) for NodePairsIndex in range(len(NodePairs),-1,-1): SequenceCount += 1 for NodePairsIndex in range(len(NodePairs),0,-1): SequenceCount -= 1 NodeIDs.append(SequenceCount) for NodePairsIndex in range(len(NodePairs)-1,-1,-1): NodeComponents = CommaSplitter.split(NodePairs[NodePairsIndex]) for NodeItem in NodeComponents: if Alphabet.find(NodeItem) >= 0: BranchDescription.append(str(NodeIDs[NodePairsIndex]) + ".." + str(self.FindParent(NodeLetters,NodeIDs,NodeItem))) else: BranchDescription.append(str(NodeIDs[NodePairsIndex]) + ".." + str(NodeItem)) return BranchDescription def FindParent(self,Letters,IDs,SearchLetter): for Index in range(0,len(Letters)): if Letters[Index] == SearchLetter: return IDs[Index] return 0
MIT License
devbisme/myhdlpeek
myhdlpeek/peekerbase.py
PeekerBase.clear
python
def clear(cls): cls.peekers = dict() cls.unit_time = None
Clear the global list of Peekers.
https://github.com/devbisme/myhdlpeek/blob/31fdb25b077fa5720136f171db7c76193a051911/myhdlpeek/peekerbase.py#L129-L132
from __future__ import absolute_import, division, print_function, unicode_literals import json import re from builtins import dict, int, str, super from collections import namedtuple import IPython.display as DISP import matplotlib.pyplot as plt import nbwavedrom from future import standard_library from tabulate import tabulate from .trace import * standard_library.install_aliases() class PeekerBase(object): peekers = dict() USE_JUPYTER = False USE_WAVEDROM = False unit_time = None def __new__(cls, *args, **kwargs): if cls is PeekerBase: raise TypeError("PeekerBase class may not be instantiated") return object.__new__(cls) def __init__(self, signal, name, **kwargs): self.trace = Trace() self.config(**kwargs) self.name_dup = False index = 0 nm = "{name}[{index}]".format(**locals()) while nm in self.peekers: self.peekers[nm].name_dup = True self.name_dup = True index += 1 nm = "{name}[{index}]".format(**locals()) self.trace.name = nm self.signal = signal self.peekers[self.trace.name] = self @classmethod def config_defaults(cls, **kwargs): Trace.config_defaults(**kwargs) global clear_traces, show_traces, show_waveforms, show_text_table, show_html_table, export_dataframe cls.USE_WAVEDROM = kwargs.pop("use_wavedrom", cls.USE_WAVEDROM) if cls.USE_WAVEDROM: cls.show_waveforms = cls.to_wavedrom cls.show_traces = traces_to_wavedrom else: cls.show_waveforms = cls.to_matplotlib cls.show_traces = traces_to_matplotlib def shw_wvfrms(*args, **kwargs): return cls.show_waveforms(*args, **kwargs) show_waveforms = shw_wvfrms def shw_trcs(*args, **kwargs): return cls.show_traces(*args, **kwargs) show_traces = shw_trcs clear_traces = cls.clear_traces export_dataframe = cls.to_dataframe show_text_table = cls.to_text_table show_html_table = cls.to_html_table cls.USE_JUPYTER = kwargs.pop("use_jupyter", cls.USE_JUPYTER) for k, v in kwargs.items(): setattr(cls, k, copy(v)) def config(self, **kwargs): self.trace.config(**kwargs) for k, v in kwargs.items(): if isinstance(v, dict): setattr(self, k, copy(getattr(self, k, {}))) getattr(self, k).update(v) else: setattr(self, k, copy(v)) @classmethod
MIT License
a5kin/xentica
xentica/core/base.py
CellularAutomaton.save
python
def save(self, filename): with open(filename, "wb") as ca_file: ca_state = { "cells": self.gpu.arrays.cells.get(), "colors": self.gpu.arrays.colors.get(), "random": self.random, } pickle.dump(ca_state, ca_file)
Save the CA state into ``filename`` file.
https://github.com/a5kin/xentica/blob/ca08fac9f85af71c9d6d98545a33d50323f851b3/xentica/core/base.py#L755-L763
import functools import itertools import operator import threading import pickle import collections from copy import deepcopy import numpy as np from pycuda.autoinit import context from pycuda.compiler import SourceModule import pycuda.gpuarray as gpuarray from xentica.bridge import MoireBridge from xentica.seeds.random import LocalRandom from xentica.core.properties import Property, ContainerProperty from xentica.core.renderers import RendererPlain from xentica.core.exceptions import XenticaException __all__ = ['context', 'BSCA', 'CellularAutomaton', 'CachedNeighbor'] class CachedNeighbor: def __init__(self): self.main = ContainerProperty() self.buffer = ContainerProperty() class BSCA(type): mandatory_fields = ( 'dimensions', 'lattice', 'neighborhood', 'border', ) @classmethod def __prepare__(mcs, _name, _bases): return collections.OrderedDict() def __new__(mcs, name, bases, attrs): keys = [] for key in attrs.keys(): if key not in ('__module__', '__qualname__'): keys.append(key) attrs['__ordered__'] = keys new_class = super().__new__(mcs, name, bases, attrs) mcs._parents = [b for b in bases if isinstance(b, BSCA)] if not mcs._parents: return new_class mcs._prepare_topology(new_class, attrs) new_class.main = None new_class.buffers = None new_class.neighbors = None mcs._prepare_properties(new_class, bases, attrs) new_class.constants = {} if not hasattr(new_class, 'renderer'): new_class.renderer = RendererPlain() num_dimensions = new_class.topology.dimensions new_class.size = tuple(1 for i in range(num_dimensions)) return new_class def _prepare_topology(cls, attrs): if hasattr(cls, 'Topology'): attrs['Topology'] = cls.Topology cls.topology = attrs.get('Topology') if cls.topology is None: raise XenticaException("No Topology class declared.") for field in cls.mandatory_fields: if not hasattr(cls.topology, field): msg = "No %s declared in Topology class." % field raise XenticaException(msg) cls.topology.lattice = deepcopy(cls.topology.lattice) cls.topology.neighborhood = deepcopy(cls.topology.neighborhood) cls.topology.border = deepcopy(cls.topology.border) cls.topology.lattice.dimensions = cls.topology.dimensions cls.topology.neighborhood.dimensions = cls.topology.dimensions cls.topology.border.dimensions = cls.topology.dimensions cls.topology.neighborhood.topology = cls.topology cls.topology.border.topology = cls.topology def _prepare_properties(cls, bases, attrs): cls.main = ContainerProperty() cls.buffers = [] cls.neighbors = [] cls.meta = type('MetaParams', (object,), dict()) num_neighbors = len(cls.topology.neighborhood) for i in range(num_neighbors): cls.buffers.append(ContainerProperty()) cls.neighbors.append(CachedNeighbor()) attrs_items = [base_class.__dict__.items() for base_class in bases] attrs_items.append(attrs.items()) restricted_names = {"main", "buffer"} for obj_name, obj in itertools.chain.from_iterable(attrs_items): allowed_name = obj_name not in restricted_names if isinstance(obj, Property) and allowed_name: cls.main[obj_name] = deepcopy(obj) vname = "_cell_%s" % obj_name cls.main[obj_name].var_name = vname for i in range(num_neighbors): buffers = cls.buffers buffers[i][obj_name] = deepcopy(obj) vname = "_bcell_%s%d" % (obj_name, i) buffers[i][obj_name].var_name = vname neighbor = cls.neighbors[i] neighbor.main[obj_name] = deepcopy(obj) vname = "_dcell_%s%d" % (obj_name, i) neighbor.main[obj_name].var_name = vname neighbor.buffer[obj_name] = deepcopy(obj) vname = "_dbcell_%s%d" % (obj_name, i) neighbor.buffer[obj_name].var_name = vname elif obj.__class__.__name__ == "Parameter" and allowed_name: obj.name = obj_name setattr(cls.meta, obj_name, obj) cls.main.buf_num = 0 cls.main.nbr_num = -1 cls.main.var_name = "_cell" for i in range(num_neighbors): rev_i = cls.topology.neighborhood.rev_index_map[i] cls.buffers[i].buf_num = rev_i + 1 cls.buffers[i].nbr_num = -1 cls.buffers[i].var_name = "_bcell%i" % i cls.neighbors[i].main.buf_num = 0 cls.neighbors[i].main.nbr_num = i cls.neighbors[i].main.var_name = "_dcell%d" % i cls.neighbors[i].buffer.buf_num = i + 1 cls.neighbors[i].buffer.nbr_num = i cls.neighbors[i].buffer.var_name = "_dbcell%d" % i class Translator: def __init__(self): self.cuda_source = "" self._func_body = "" self._deferred_writes = set() self._declarations = set() self._unpacks = set() self._params = {} self._emit_params = [] self._absorb_params = [] self._render_params = [] self._coords_declared = False @staticmethod def _elementwise_kernel(name, args, body): arg_string = ", ".join(["%s %s" % (t, v) for t, v in args]) kernel = """ __global__ void %s(%s, int n) { unsigned tid = threadIdx.x; unsigned total_threads = gridDim.x * blockDim.x; unsigned cta_start = blockDim.x * blockIdx.x; for (unsigned i = cta_start + tid; i < n; i += total_threads) { %s } } """ % (name, arg_string, body) return kernel def _translate_code(self, *funcs): self._func_body = "" self._deferred_writes = set() self._declarations = set() self._unpacks = set() self._params = {} self._coords_declared = False for func in funcs: func() for prop in self._deferred_writes: prop.deferred_write() return self._func_body def append_code(self, code): self._func_body += code def deferred_write(self, prop): self._deferred_writes.add(prop) def declare(self, prop): self._declarations.add(prop) def unpack(self, prop): self._unpacks.add(prop) def is_declared(self, prop): return prop in self._declarations def is_unpacked(self, prop): return prop in self._unpacks def declare_coords(self): self._coords_declared = True def define_constant(self, constant): self.constants[constant.name] = deepcopy(constant) def is_constant(self, constant_name): return constant_name in self.constants def define_parameter(self, param): self._params[param.name] = param def is_parameter(self, param_name): return param_name in self._params @property def coords_declared(self): return self._coords_declared def build_source(self): source = self.build_emit() source += self.build_absorb() source += self.build_render() source = self.build_defines() + source self.cuda_source = source def build_defines(self): defines = "" for const in self.constants.values(): defines += const.get_define_code() return defines def build_emit(self): args = [(self.main.ctype, "*fld"), ] body = self._translate_code(self.emit) self._emit_params = list(self._params.values()) args += [(param.ctype, param.name) for param in self._emit_params] return self._elementwise_kernel("emit", args, body) def build_absorb(self): args = [(self.main.ctype, "*fld"), ("int3", "*col")] body = self._translate_code(self.absorb, self.color) self._absorb_params = list(self._params.values()) args += [(param.ctype, param.name) for param in self._absorb_params] return self._elementwise_kernel("absorb", args, body) def build_render(self): args = self.renderer.args body = self.renderer.render_code() return self._elementwise_kernel("render", args, body) def index_to_coord(self, i): return self.topology.lattice.index_to_coord(i, self) def pack_state(self, state): val = 0 shift = 0 for name, prop in self.main.properties.items(): if name in state: val += state[name] << shift shift += prop.bit_width return val class GPUKernels: def __init__(self, source): cuda_module = SourceModule(source, options=["-w", ]) self.emit = cuda_module.get_function("emit") self.absorb = cuda_module.get_function("absorb") self.render = cuda_module.get_function("render") class GPUArrays: def __init__(self, init_cells, init_colors): self.img = None self.colors = gpuarray.to_gpu(init_colors) self.cells = gpuarray.to_gpu(init_cells) def init_img(self, num_cells): self.img = gpuarray.zeros((num_cells, ), dtype=np.int32) class GPU: def __init__(self, source, init_cells, init_colors): self.kernels = GPUKernels(source) self.arrays = GPUArrays(init_cells, init_colors) class CellularAutomaton(Translator, metaclass=BSCA): def __init__(self, experiment_class): super().__init__() self.frame_buf = np.zeros((3, ), dtype=np.uint8) self.width, self.height = 0, 0 self.random = LocalRandom(experiment_class.word) experiment_class.seed.random = self.random experiment_class.random = self.random for attr_name in dir(experiment_class): attr = getattr(experiment_class, attr_name) if (not callable(attr) and not attr_name.startswith("__")): if attr_name == 'seed': continue if hasattr(self.meta, attr_name): self.meta.__dict__[attr_name].__set__(self, attr) continue setattr(self, attr_name, attr) self.speed = 1 self.paused = False self.timestep = 0 self.cells_num = functools.reduce(operator.mul, self.size) self.build_source() init_colors = np.zeros((self.cells_num * 3, ), dtype=np.int32) cells_total = self.cells_num * (len(self.buffers) + 1) + 1 init_cells = np.zeros((cells_total, ), dtype=self.main.dtype) experiment_class.seed.generate(init_cells, self) self.gpu = GPU(self.cuda_source, init_cells, init_colors) self.bridge = MoireBridge self.renderer.setup_actions(self.bridge) self._lock = threading.Lock() def apply_speed(self, dval): self.speed = max(1, (self.speed + dval)) def toggle_pause(self): self.paused = not self.paused def set_viewport(self, size): self.width, self.height = size num_cells = self.width * self.height * 3 self.gpu.arrays.init_img(num_cells) def step(self): if self.paused: return block, grid = self.gpu.arrays.cells._block, self.gpu.arrays.cells._grid with self._lock: args = [param.dtype(param.value) for param in self._emit_params] self.gpu.kernels.emit(self.gpu.arrays.cells, *args, np.int32(self.cells_num), block=block, grid=grid) args = [param.dtype(param.value) for param in self._absorb_params] self.gpu.kernels.absorb(self.gpu.arrays.cells, self.gpu.arrays.colors, *args, np.int32(self.cells_num), block=block, grid=grid) self.timestep += 1 def render(self): if self.gpu.arrays.img is None: msg = "Viewport is not set, call set_viewport() before rendering." raise XenticaException(msg) block, grid = self.gpu.arrays.img._block, self.gpu.arrays.img._grid with self._lock: args = self.renderer.get_args_vals(self) args += [param.dtype(param.value) for param in self._render_params] args.append(np.int32(self.width * self.height)) self.gpu.kernels.render(*args, block=block, grid=grid) return self.gpu.arrays.img.get().astype(np.uint8)
MIT License
duartegroup/autode
autode/species/species.py
Species.centre
python
def centre(self) -> None: self.translate(vec=-np.average(self.coordinates, axis=0)) return None
Translate this molecule so the centroid (~COM) is at the origin
https://github.com/duartegroup/autode/blob/6505d5bbbd1906f57e4102e13f177510f166bbed/autode/species/species.py#L699-L702
import numpy as np import autode.values as val from copy import deepcopy from datetime import date from typing import Optional, Union, List, Sequence from scipy.spatial import distance_matrix from autode.log import logger from autode.atoms import Atom, Atoms, AtomCollection from autode.exceptions import CalculationException from autode.geom import calc_rmsd, get_rot_mat_euler from autode.constraints import Constraints from autode.log.methods import methods from autode.conformers.conformers import Conformers from autode.solvent.solvents import ExplicitSolvent, get_solvent from autode.calculation import Calculation from autode.wrappers.keywords import Keywords from autode.config import Config from autode.input_output import atoms_to_xyz_file from autode.mol_graphs import is_isomorphic, reorder_nodes from autode.methods import get_lmethod, get_hmethod, ElectronicStructureMethod from autode.mol_graphs import make_graph from autode.hessians import Hessian from autode.units import ha_per_ang_sq, ha_per_ang from autode.thermochemistry.symmetry import symmetry_number from autode.thermochemistry.igm import calculate_thermo_cont, LFMethod from autode.utils import (requires_atoms, work_in, requires_conformers) class Species(AtomCollection): def __str__(self): if self.atoms is None: atoms_str = '' else: atoms_str = ''.join([atom.label for atom in self.atoms[:100]]) solv_str = self.solvent.name if self.solvent is not None else 'none' return f'{self.name}_{self.charge}_{self.mult}_{atoms_str}_{solv_str}' def _repr(self, prefix: str): string = (f'{prefix}(' f'{self.name}, ' f'n_atoms={self.n_atoms}, ' f'charge={self.charge}, ' f'mult={self.mult})') return string def __repr__(self): return self._repr(prefix='Species') def copy(self) -> 'Species': return deepcopy(self) def new_species(self, name='species') -> 'Species': species = Species(name, deepcopy(self.atoms), self.charge, self.mult) species.graph = None if self.graph is None else self.graph.copy() species.solvent = None if self.solvent is None else self.solvent.copy() return species @property def charge(self) -> int: return self._charge @charge.setter def charge(self, value) -> None: self._charge = int(value) @property def mult(self) -> int: return self._mult @mult.setter def mult(self, value) -> None: self._mult = int(value) @AtomCollection.atoms.setter def atoms(self, value: Union[List[Atom], Atoms, None]): if value is None: self._atoms = None return if (self.n_atoms == len(value) and all(a.label == v.label for a, v in zip(self.atoms, value))): rmsd = calc_rmsd(coords1=np.array([v.coord for v in value]), coords2=np.array([a.coord for a in self.atoms])) else: rmsd = None if rmsd is None or rmsd > 1E-8: logger.info(f'Geometry changed- resetting energies of {self.name}') self.energies.clear() self.gradient = None self.hessian = None self._atoms = Atoms(value) return @property def formula(self) -> str: if self.atoms is None: return "" symbols = [atom.label for atom in self.atoms] formula_str = '' for symbol in sorted(set(symbols)): num = symbols.count(symbol) formula_str += f'{symbol}{num if num > 1 else ""}' return formula_str @property def hessian(self) -> Optional[Hessian]: return self._hess @hessian.setter def hessian(self, value: Union[Hessian, np.ndarray, None]): req_shape = (3*self.n_atoms, 3*self.n_atoms) if hasattr(value, 'shape') and value.shape != req_shape: raise ValueError('Could not set the Hessian. Incorrect shape: ' f'{value.shape} != {req_shape}') if value is None: self._hess = None elif isinstance(value, Hessian): self._hess = value if self._hess.atoms is None: self._hess.atoms = self.atoms elif isinstance(value, np.ndarray): logger.warning('Setting the Hessian from a numpy array - assuming ' 'units of Ha Å^-2') self._hess = Hessian(value, atoms=self.atoms, units=ha_per_ang_sq) else: raise ValueError(f'Could not set Hessian with {value}, Must be ' f'a numpy array or a Hessian.') @property def gradient(self) -> Optional[val.Gradient]: return self._grad @gradient.setter def gradient(self, value: Union[val.Gradient, np.ndarray, None]): if hasattr(value, 'shape') and value.shape != (self.n_atoms, 3): raise ValueError('Could not set the gradient. Incorrect shape: ' f'{value.shape} != {(self.n_atoms, 3)}') if value is None: self._grad = None elif isinstance(value, val.Gradient): self._grad = value elif isinstance(value, np.ndarray): logger.warning('Setting the gradients from a numpy array - ' 'assuming Ha / Å units') self._grad = val.Gradient(value, units=ha_per_ang) else: raise ValueError(f'Could not set the gradient with {value}, Must ' f'be a numpy array or a Hessian.') @property def frequencies(self) -> Optional[List[val.Frequency]]: if self.hessian is None: logger.warning('No Hessian has been calculated - no frequencies') return None return self.hessian.frequencies_proj @property def vib_frequencies(self) -> Optional[List[val.Frequency]]: n = 6 if not self.is_linear() else 5 return self.frequencies[n:] if self.frequencies is not None else None @property def imaginary_frequencies(self) -> Optional[List[val.Frequency]]: if self.frequencies is None: logger.warning('Had no frequencies - could not find any imaginary') return None imag_freqs = [freq for freq in self.frequencies if freq.is_imaginary] if len(imag_freqs) == 0: logger.warning('No imaginary frequencies') return None return imag_freqs def normal_mode(self, mode_number: int) -> Optional[val.Coordinates]: if self.hessian is None: logger.warning('Could not calculate a normal mode displacement') return None return self.hessian.normal_modes_proj[mode_number] @property @requires_atoms def bond_matrix(self) -> np.ndarray: matrix = np.zeros(shape=(self.n_atoms, self.n_atoms), dtype=bool) if self.graph is None: raise ValueError('No molecular graph set. Bonds are not defined') for bond in self.graph.edges: matrix[tuple(bond)] = matrix[tuple(reversed(bond))] = True return matrix @property def radius(self) -> val.Distance: if self.n_atoms == 0: return val.Distance(0.0) coords = self.coordinates return val.Distance(np.max(distance_matrix(coords, coords)) / 2.0) @property def sn(self) -> int: if self.n_atoms == 0: return 1 if self.n_atoms > 50: logger.warning('Symmetry number calculations are not implemented ' 'for large molecules. Assuming C1 -> σ_R=1') return 1 return symmetry_number(self) @property def is_explicitly_solvated(self) -> bool: return isinstance(self.solvent, ExplicitSolvent) @property def energy(self) -> Optional[val.PotentialEnergy]: return self.energies.last(val.PotentialEnergy) @energy.setter def energy(self, value: Union[val.Energy, float, None]): if value is None: pass elif isinstance(value, val.PotentialEnergy): self.energies.append(value) else: self.energies.append(val.PotentialEnergy(value)) @property def h_cont(self) -> Optional[val.EnthalpyCont]: return self.energies.last(val.EnthalpyCont) @property def g_cont(self) -> Optional[val.FreeEnergyCont]: return self.energies.last(val.FreeEnergyCont) @property def free_energy(self) -> Optional[val.FreeEnergy]: try: return val.FreeEnergy(self.energy + self.g_cont) except TypeError: logger.warning('Could not calculate G - an energy was None') return None @property def enthalpy(self) -> Optional[val.Enthalpy]: try: return val.Enthalpy(self.energy + self.h_cont) except TypeError: logger.warning('Could not calculate H - an energy was None') return None @property def n_conformers(self) -> int: return 0 if self.conformers is None else len(self.conformers) @property def conformers(self) -> 'autode.conformers.Conformers': return self._conformers @conformers.setter def conformers(self, value: Union[List['autode.conformers.Conformer'], 'autode.conformers.Conformers', None] ) -> None: if value is None: self._conformers.clear() return self._conformers = Conformers([conf for conf in value]) def _generate_conformers(self, *args, **kwargs): raise NotImplementedError('Could not generate conformers. ' 'generate_conformers() not implemented') def _run_hess_calculation(self, method): if self.n_atoms < 2: logger.warning(f'Not running a Hessian calculation on only ' f'{self.n_atoms} atom(s). Cannot have frequencies') return None method = method if method is not None else get_hmethod() calc = Calculation(name=f'{self.name}_hess', molecule=self, method=method, keywords=method.keywords.hess, n_cores=Config.n_cores) calc.run() self.energy = calc.get_energy() self.hessian = calc.get_hessian() return None @requires_conformers def _set_lowest_energy_conformer(self): lowest_energy = None for conformer in self.conformers: if conformer.energy is None or conformer.atoms is None: continue make_graph(conformer) if not is_isomorphic(conformer.graph, self.graph, ignore_active_bonds=True): logger.warning('Conformer had a different graph. Ignoring') continue if lowest_energy is None: lowest_energy = conformer.energy if conformer.energy <= lowest_energy: self.atoms = conformer.atoms self.energy = conformer.energy lowest_energy = conformer.energy if lowest_energy is None: raise RuntimeError("Failed to set the lowest energy conformer as " "no suitable conformers were present") return None def populate_conformers(self, *args, **kwargs): return self._generate_conformers(*args, **kwargs) @requires_atoms def reorder_atoms(self, mapping: dict) -> None: if not (set(mapping.keys()) == set(mapping.values()) == set(list(range(self.n_atoms)))): raise ValueError('Invalid mapping. Must be 1-1 for all atoms') self._atoms = Atoms([self.atoms[i] for i in sorted(mapping, key=mapping.get)]) if self.graph is None: return self.graph = reorder_nodes(graph=self.graph, mapping={u: v for v, u in mapping.items()}) return @requires_atoms def is_linear(self, tol: Optional[float] = None, angle_tol: val.Angle = val.Angle(1.0, units='deg')) -> bool: if tol is not None: angle_tol = val.Angle(np.arccos(1.0 - tol), units='rad') return self.atoms.are_linear(angle_tol=angle_tol) @requires_atoms def translate(self, vec: Sequence[float]) -> None: for atom in self.atoms: atom.translate(vec) return None @requires_atoms def rotate(self, axis: Union[np.ndarray, Sequence], theta: Union[val.Angle, float], origin: Union[np.ndarray, Sequence, None] = None) -> None: origin = np.zeros(3) if origin is None else np.array(origin, copy=True) coords = self.coordinates coords -= origin coords = np.dot(coords, get_rot_mat_euler(axis=axis, theta=theta).T) coords += origin for atom, new_coord in zip(self.atoms, coords): atom.coord = new_coord return None @requires_atoms
MIT License
ufkapano/graphs-dict
graphtheory/coloring/edgecolorcs.py
ConnectedSequentialEdgeColoring1.__init__
python
def __init__(self, graph): if graph.is_directed(): raise ValueError("the graph is directed") self.graph = graph self.parent = dict() self.color = dict() self.m = 0 for edge in self.graph.iteredges(): if edge.source == edge.target: raise ValueError("a loop detected") else: self.color[edge] = None self.m += 1 if len(self.color) < self.m: raise ValueError("edges are not unique") self.saturation = dict((node, set()) for node in self.graph.iternodes())
The algorithm initialization.
https://github.com/ufkapano/graphs-dict/blob/ab42c51b8fa2c4cdb8d5cfd0b7de4702824f22c9/graphtheory/coloring/edgecolorcs.py#L27-L43
try: from Queue import Queue except ImportError: from queue import Queue xrange = range class ConnectedSequentialEdgeColoring1:
BSD 3-Clause New or Revised License
block42-blockchain-company/thornode-telegram-bot
bot/service/utils.py
tor_to_rune
python
def tor_to_rune(tor): tor = int(float(tor)) if tor == 0: return "0 RUNE" elif tor >= 100000000: return "{:,} RUNE".format(int(tor / 100000000)) else: return '{:.4f} RUNE'.format(tor / 100000000)
1e8 Tor are 1 Rune Format depending if RUNE > or < Zero
https://github.com/block42-blockchain-company/thornode-telegram-bot/blob/6478b1eb41e36c5fdd327b963b55343de1ce5337/bot/service/utils.py#L242-L255
import asyncio from collections import defaultdict from datetime import datetime, timedelta from typing import Callable, Awaitable from service.binance_network_service import get_binance_balance from service.thorchain_network_service import * from constants.messages import NetworkHealthStatus from collections import MutableMapping def flatten_dictionary(dictionary: dict) -> dict: tuple_list = dictionary_to_flat_tuple_list(dictionary) flat_dict = {tuple_list[i][0]: tuple_list[i][1] for i in range(0, len(tuple_list))} return flat_dict def dictionary_to_flat_tuple_list(dictionary: dict) -> list: items = [] for k, v in dictionary.items(): if isinstance(v, MutableMapping): items.extend(dictionary_to_flat_tuple_list(dictionary[k])) else: items.append((k, v)) return items async def for_each_async(elements: [], function: Callable[..., Awaitable[None]]): tasks = [] for element in elements: tasks.append(function(element)) await asyncio.gather(*tasks) def format_to_days_and_hours(duration: timedelta) -> str: result = "" hours = duration.seconds // 3600 if duration.days > 0: result += str(duration.days) if duration.days == 1: result += ' day' else: result += " days" if hours > 0: result += " " if hours <= 0: if duration.days <= 0: result += "< 1 hour" else: result += str(hours) if hours == 1: result += ' hour' else: result += ' hours' return result def asgard_solvency_check() -> dict: solvency_report = {'is_solvent': True} asgard_actual = defaultdict(lambda: {"json": {}}) asgard_expected = get_asgard_json() pool_addresses = get_pool_addresses_from_any_node() for chain_data in pool_addresses: chain = chain_data['chain'] if chain == 'BNB': asgard_actual[chain]['json'] = get_binance_balance(chain_data['address']) break for chain_key, chain_value in asgard_actual.items(): if chain_key == 'BNB': for balance in chain_value['json']: chain_value[balance['symbol']] = balance['free'] break for chain in asgard_expected: if chain['status'] == 'active': for coin in chain['coins']: asset = coin['asset'].split('.') actual_amount_formatted = (asgard_actual.get(asset[0]). setdefault(asset[1], "0").replace(".", "")) expected_amount_formatted = (coin['amount'].replace(".", "")) if int(actual_amount_formatted) < int(expected_amount_formatted): solvency_report['is_solvent'] = False if 'insolvent_coins' not in solvency_report: solvency_report['insolvent_coins'] = {} solvency_report['insolvent_coins'][coin['asset']] = { "expected": coin['amount'], "actual": asgard_actual[asset[0]][asset[1]] } else: if 'solvent_coins' not in solvency_report: solvency_report['solvent_coins'] = {} solvency_report['solvent_coins'][coin['asset']] = asgard_actual[asset[0]][asset[1]] return solvency_report def yggdrasil_solvency_check() -> dict: solvency_report = {'is_solvent': True} yggdrasil_actual = {} yggdrasil_expected = get_yggdrasil_json() for vault in yggdrasil_expected: if vault['status'] == 'active' and vault['vault']['status'] == 'active': for chain in vault['addresses']: if chain['chain'] == 'BNB': public_key = vault['vault']['pub_key'] if public_key not in yggdrasil_actual: yggdrasil_actual[public_key] = {} if chain['chain'] not in yggdrasil_actual[public_key]: yggdrasil_actual[public_key][chain['chain']] = {} yggdrasil_actual[public_key][chain['chain']] = {"json": {}} yggdrasil_actual[public_key][chain['chain']]['json'] = get_binance_balance(chain['address']) for vault in yggdrasil_actual: for chain_key, chain_value in yggdrasil_actual[vault].items(): if chain_key == 'BNB': for balance in chain_value['json']: chain_value[balance['symbol']] = balance['free'] for vault in yggdrasil_expected: if vault['status'] == 'active' and vault['vault']['status'] == 'active': for coin in vault['vault']['coins']: asset = coin['asset'].split('.') actual_amount = (yggdrasil_actual[vault['vault']['pub_key']].get(asset[0]).setdefault(asset[1], "0") .replace(".", "")) expected_amount = (coin['amount'].replace(".", "")) if int(actual_amount) < int(expected_amount): solvency_report['is_solvent'] = False if 'insolvent_coins' not in solvency_report: solvency_report['insolvent_coins'] = {} if vault['vault']['pub_key'] not in solvency_report['insolvent_coins']: solvency_report['insolvent_coins'][vault['vault']['pub_key']] = {} solvency_report['insolvent_coins'][vault['vault']['pub_key']][coin['asset']] = { "expected": coin['amount'], "actual": yggdrasil_actual[vault['vault']['pub_key']][asset[0]][asset[1]] } else: if 'solvent_coins' not in solvency_report: solvency_report['solvent_coins'] = {} if coin['asset'] in solvency_report['solvent_coins']: solvency_report['solvent_coins'][coin['asset']] += float(yggdrasil_actual[vault['vault']['pub_key']][asset[0]][asset[1]]) else: solvency_report['solvent_coins'][coin['asset']] = float(yggdrasil_actual[vault['vault']['pub_key']][asset[0]][asset[1]]) return solvency_report def get_solvency_message(asgard_solvency, yggdrasil_solvency) -> str: message = "Tracked Balances of *Asgard*:\n" if 'insolvent_coins' in asgard_solvency: for coin_key, coin_value in asgard_solvency['insolvent_coins'].items(): message += f"*{coin_key}*:\n" f" Expected: {coin_value['expected']}\n" f" Actual: {coin_value['actual']}\n" if 'solvent_coins' in asgard_solvency: for coin_key, coin_value in asgard_solvency['solvent_coins'].items(): message += f"*{coin_key}*: {coin_value}\n" message += "\nTracked Balances of *Yggdrasil*:\n" if 'insolvent_coins' in yggdrasil_solvency: for pub_key, coins in yggdrasil_solvency['insolvent_coins'].items(): for coin_key, coin_value in coins.items(): message += f"*{pub_key}*:\n" f"*{coin_key}*:\n" f" Expected: {coin_value['expected']}\n" f" Actual: {coin_value['actual']}\n" if 'solvent_coins' in yggdrasil_solvency: for coin_key, coin_value in yggdrasil_solvency['solvent_coins'].items(): message += f"*{coin_key}*: {coin_value}\n" return message def get_insolvent_balances_message(asgard_solvency, yggdrasil_solvency) -> str: message = "" if 'insolvent_coins' in asgard_solvency: message += "Insolvent Balances of *Asgard*:\n" for coin_key, coin_value in asgard_solvency['insolvent_coins'].items(): message += f"*{coin_key}*:\n" f" Expected: {coin_value['expected']}\n" f" Actual: {coin_value['actual']}\n" if 'insolvent_coins' in yggdrasil_solvency: message += "\nInsolvent Balances of *Yggdrasil*:\n" for pub_key, coins in yggdrasil_solvency['insolvent_coins'].items(): for coin_key, coin_value in coins.items(): message += f"*{pub_key}*:\n" f"*{coin_key}*:\n" f" Expected: {coin_value['expected']}\n" f" Actual: {coin_value['actual']}\n" return message def network_security_ratio_to_string(network_security_ratio): if network_security_ratio > 0.9: network_security_string = NetworkHealthStatus.INEFFICIENT elif 0.9 >= network_security_ratio > 0.75: network_security_string = NetworkHealthStatus.OVERBONDED elif 0.75 >= network_security_ratio >= 0.6: network_security_string = NetworkHealthStatus.OPTIMAL elif 0.6 > network_security_ratio >= 0.5: network_security_string = NetworkHealthStatus.UNDBERBONDED else: network_security_string = NetworkHealthStatus.INSECURE return network_security_string def get_network_security_ratio(network_json): total_active_bond = int(network_json['bondMetrics']['totalActiveBond']) total_staked = int(network_json['totalPooledRune']) return total_active_bond / (total_active_bond + total_staked)
MIT License
scikit-hep/cabinetry
src/cabinetry/histo.py
Histogram.validate
python
def validate(self, name: str) -> None: empty_bins = np.where(np.atleast_1d(self.yields) == 0.0)[0] if len(empty_bins) > 0: log.warning(f"{name} has empty bins: {empty_bins}") nan_pos = np.where(np.isnan(self.stdev))[0] if len(nan_pos) > 0: log.warning(f"{name} has bins with ill-defined stat. unc.: {nan_pos}") not_empty_but_nan = [b for b in nan_pos if b not in empty_bins] if len(not_empty_but_nan) > 0: log.warning( f"{name} has non-empty bins with ill-defined stat. unc.: " f"{not_empty_but_nan}" )
Runs consistency checks on a histogram. Checks for empty bins and ill-defined statistical uncertainties. Logs warnings if issues are founds, but does not raise exceptions. Args: name (str): name of the histogram for logging purposes
https://github.com/scikit-hep/cabinetry/blob/b89bb09b97608e984864cde6d45f9b8901612a06/src/cabinetry/histo.py#L194-L222
import logging import os import pathlib from typing import Any, Dict, List, Optional, Type, TypeVar, Union import boost_histogram as bh import numpy as np import cabinetry from cabinetry._typing import Literal log = logging.getLogger(__name__) H = TypeVar("H", bound="Histogram") class Histogram(bh.Histogram, family=cabinetry): @classmethod def from_arrays( cls: Type[H], bins: Union[List[float], np.ndarray], yields: Union[List[float], np.ndarray], stdev: Union[List[float], np.ndarray], ) -> H: if len(bins) != len(yields) + 1: raise ValueError("bin edges need one more entry than yields") if len(yields) != len(stdev): raise ValueError("yields and stdev need to have the same shape") out = cls( bh.axis.Variable(bins, underflow=False, overflow=False), storage=bh.storage.Weight(), ) yields = np.asarray(yields) stdev = np.asarray(stdev) out[...] = np.stack([yields, stdev ** 2], axis=-1) return out @classmethod def from_path( cls: Type[H], histo_path: pathlib.Path, *, modified: bool = True ) -> H: if modified: histo_path_modified = histo_path.parent / (histo_path.name + "_modified") if not histo_path_modified.with_suffix(".npz").exists(): log.warning( f"the modified histogram {histo_path_modified.with_suffix('.npz')} " "does not exist" ) log.warning("loading the un-modified histogram instead!") else: histo_path = histo_path_modified histogram_npz = np.load(histo_path.with_suffix(".npz")) bins = histogram_npz["bins"] yields = histogram_npz["yields"] stdev = histogram_npz["stdev"] return cls.from_arrays(bins, yields, stdev) @classmethod def from_config( cls: Type[H], histo_folder: Union[str, pathlib.Path], region: Dict[str, Any], sample: Dict[str, Any], systematic: Dict[str, Any], *, template: Optional[Literal["Up", "Down"]] = None, modified: bool = True, ) -> H: histo_name = name(region, sample, systematic, template=template) histo_path = pathlib.Path(histo_folder) / histo_name return cls.from_path(histo_path, modified=modified) @property def yields(self) -> np.ndarray: return self.values() @yields.setter def yields(self, value: np.ndarray) -> None: self.view().value = value @property def stdev(self) -> np.ndarray: return np.sqrt(self.variances()) @stdev.setter def stdev(self, value: np.ndarray) -> None: self.view().variance = value ** 2 @property def bins(self) -> np.ndarray: return self.axes[0].edges def save(self, histo_path: pathlib.Path) -> None: log.debug(f"saving histogram to {histo_path.with_suffix('.npz')}") if not os.path.exists(histo_path.parent): os.mkdir(histo_path.parent) np.savez( histo_path.with_suffix(".npz"), yields=self.yields, stdev=self.stdev, bins=self.bins, )
BSD 3-Clause New or Revised License
fabtools/fabtools
fabtools/files.py
owner
python
def owner(path, use_sudo=False): func = use_sudo and run_as_root or run with settings(hide('running', 'stdout'), warn_only=True): result = func('stat -c %%U "%(path)s"' % locals()) if result.failed and 'stat: illegal option' in result: return func('stat -f %%Su "%(path)s"' % locals()) else: return result
Get the owner name of a file or directory.
https://github.com/fabtools/fabtools/blob/5fdc7174c3fae5e93a16d677d0466f41dc2be175/fabtools/files.py#L52-L65
from pipes import quote import os from fabric.api import ( abort, env, hide, run, settings, sudo, warn, ) from fabric.contrib.files import upload_template as _upload_template from fabric.contrib.files import exists import six from fabtools.utils import run_as_root def is_file(path, use_sudo=False): func = use_sudo and run_as_root or run with settings(hide('running', 'warnings'), warn_only=True): return func('[ -f "%(path)s" ]' % locals()).succeeded def is_dir(path, use_sudo=False): func = use_sudo and run_as_root or run with settings(hide('running', 'warnings'), warn_only=True): return func('[ -d "%(path)s" ]' % locals()).succeeded def is_link(path, use_sudo=False): func = use_sudo and run_as_root or run with settings(hide('running', 'warnings'), warn_only=True): return func('[ -L "%(path)s" ]' % locals()).succeeded
BSD 2-Clause Simplified License
openstate-sdn/ryu
ryu/contrib/ovs/vlog.py
Vlog.exception
python
def exception(self, message): self.err(message, exc_info=True)
Logs 'message' at ERR log level. Includes a backtrace when in exception context.
https://github.com/openstate-sdn/ryu/blob/b4a7f6c3615a934eaf42894bcb1cc809fce96e93/ryu/contrib/ovs/vlog.py#L90-L93
import datetime import logging import logging.handlers import re import socket import sys import ovs.dirs import ovs.unixctl import ovs.util FACILITIES = {"console": "info", "file": "info", "syslog": "info"} LEVELS = { "dbg": logging.DEBUG, "info": logging.INFO, "warn": logging.WARNING, "err": logging.ERROR, "emer": logging.CRITICAL, "off": logging.CRITICAL } def get_level(level_str): return LEVELS.get(level_str.lower()) class Vlog: __inited = False __msg_num = 0 __mfl = {} __log_file = None __file_handler = None def __init__(self, name): assert not Vlog.__inited self.name = name.lower() if name not in Vlog.__mfl: Vlog.__mfl[self.name] = FACILITIES.copy() def __log(self, level, message, **kwargs): if not Vlog.__inited: return now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") message = ("%s|%s|%s|%s|%s" % (now, Vlog.__msg_num, self.name, level, message)) level = LEVELS.get(level.lower(), logging.DEBUG) Vlog.__msg_num += 1 for f, f_level in Vlog.__mfl[self.name].iteritems(): f_level = LEVELS.get(f_level, logging.CRITICAL) if level >= f_level: logging.getLogger(f).log(level, message, **kwargs) def emer(self, message, **kwargs): self.__log("EMER", message, **kwargs) def err(self, message, **kwargs): self.__log("ERR", message, **kwargs) def warn(self, message, **kwargs): self.__log("WARN", message, **kwargs) def info(self, message, **kwargs): self.__log("INFO", message, **kwargs) def dbg(self, message, **kwargs): self.__log("DBG", message, **kwargs)
Apache License 2.0
mindspore-ai/mindarmour
examples/common/networks/lenet5/lenet5_net_for_fuzzing.py
fc_with_initialize
python
def fc_with_initialize(input_channels, out_channels): weight = weight_variable() bias = weight_variable() return nn.Dense(input_channels, out_channels, weight, bias)
Wrap initialize method of full connection layer.
https://github.com/mindspore-ai/mindarmour/blob/df44e3de733cdfc568d856d120bd8a53e6f25cc1/examples/common/networks/lenet5/lenet5_net_for_fuzzing.py#L29-L33
from mindspore import nn from mindspore.common.initializer import TruncatedNormal from mindspore.ops import TensorSummary def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): weight = weight_variable() return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="valid")
Apache License 2.0
geoscienceaustralia/agdc
src/landsat_ingester/landsat_dataset.py
LandsatDataset.get_cloud_cover
python
def get_cloud_cover(self): return self._ds.cloud_cover_percentage
Percentage cloud cover of the aquisition if available.
https://github.com/geoscienceaustralia/agdc/blob/2e22c6bdd9305555db3615305ff6a5df6219cd51/src/landsat_ingester/landsat_dataset.py#L399-L401
import os import logging import glob import re from EOtools.DatasetDrivers import SceneDataset from EOtools.execute import execute from agdc.cube_util import DatasetError from agdc.abstract_ingester import AbstractDataset from landsat_bandstack import LandsatBandstack LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.INFO) class LandsatDataset(AbstractDataset): PROCESSING_LEVEL_ALIASES = { 'Pixel Quality': 'PQA', 'Fractional Cover': 'FC' } def __init__(self, dataset_path): self._dataset_path = dataset_path LOGGER.info('Opening Dataset %s', self._dataset_path) self._ds = SceneDataset(default_metadata_required=False, utm_fix=True) self._ds = self._ds.Open(self.get_dataset_path()) if not self._ds: raise DatasetError("Unable to open %s" % self.get_dataset_path()) self._dataset_size = self._get_directory_size() if self.get_processing_level() in ['ORTHO', 'L1T', 'MAP']: LOGGER.debug('Dataset %s is Level 1', self.get_dataset_path()) self._gcp_count = self._get_gcp_count() self._mtl_text = self._get_mtl_text() else: self._gcp_count = None self._mtl_text = None self._xml_text = self._get_xml_text() AbstractDataset.__init__(self) def _get_directory_size(self): command = "du -sk %s | cut -f1" % self.get_dataset_path() LOGGER.debug('executing "%s"', command) result = execute(command) if result['returncode'] != 0: raise DatasetError('Unable to calculate directory size: ' + '"%s" failed: %s' % (command, result['stderr'])) LOGGER.debug('stdout = %s', result['stdout']) return int(result['stdout']) def _get_gcp_count(self): gcp_pattern = os.path.join(self.get_dataset_path(), 'scene01', '*_GCP.txt') return self._extract_from_file(gcp_pattern, 'GCP.txt', self._extract_gcp_count) def _get_mtl_text(self): mtl_pattern = os.path.join(self.get_dataset_path(), 'scene01', '*_MTL.txt') return self._extract_from_file(mtl_pattern, 'MTL.txt', self._extract_text) def _get_xml_text(self): xml_pattern = os.path.join(self.get_dataset_path(), 'metadata.xml') return self._extract_from_file(xml_pattern, 'metadata.xml', self._extract_text) @staticmethod def _extract_from_file(file_pattern, file_description, extract_function): try: md_path = glob.glob(file_pattern)[0] md_file = open(md_path) metadata = extract_function(md_file) md_file.close() except IndexError: metadata = None LOGGER.debug('No %s file found.', file_description) except IOError: raise DatasetError('Unable to open %s file.' % file_description) return metadata @staticmethod def _extract_text(md_file): return md_file.read() @staticmethod def _extract_gcp_count(md_file): return len([line for line in md_file.readlines() if re.match(r'\d+(\s+-?\d+\.?\d*){7}', line)]) def get_dataset_path(self): return self._dataset_path def get_satellite_tag(self): return self._ds.satellite.TAG def get_sensor_name(self): return self._ds.satellite.sensor def get_processing_level(self): level = self._ds.processor_level if level in self.PROCESSING_LEVEL_ALIASES: level = self.PROCESSING_LEVEL_ALIASES[level] return level.upper() def get_x_ref(self): return self._ds.path_number def get_y_ref(self): return self._ds.row_number def get_start_datetime(self): try: start_dt = self._ds.scene_alt_start_datetime except AttributeError: start_dt = None if start_dt is None: start_dt = self._ds.scene_start_datetime return start_dt def get_end_datetime(self): try: end_dt = self._ds.scene_alt_end_datetime except AttributeError: end_dt = None if end_dt is None: end_dt = self._ds.scene_end_datetime return end_dt def get_datetime_processed(self): return self._ds.completion_datetime def get_dataset_size(self): return self._dataset_size def get_ll_lon(self): return self._ds.ll_lon def get_ll_lat(self): return self._ds.ll_lat def get_lr_lon(self): return self._ds.lr_lon def get_lr_lat(self): return self._ds.lr_lat def get_ul_lon(self): return self._ds.ul_lon def get_ul_lat(self): return self._ds.ul_lat def get_ur_lon(self): return self._ds.ur_lon def get_ur_lat(self): return self._ds.ur_lat def get_projection(self): return self._ds.GetProjection() def get_ll_x(self): return self._ds.ll_x def get_ll_y(self): return self._ds.ll_y def get_lr_x(self): return self._ds.lr_x def get_lr_y(self): return self._ds.lr_y def get_ul_x(self): return self._ds.ul_x def get_ul_y(self): return self._ds.ul_y def get_ur_x(self): return self._ds.ur_x def get_ur_y(self): return self._ds.ur_y def get_x_pixels(self): return self._ds.image_pixels def get_y_pixels(self): return self._ds.image_lines def get_gcp_count(self): return self._gcp_count def get_mtl_text(self): return self._mtl_text
BSD 3-Clause New or Revised License
google-research/pyreach
pyreach/impl/oracle_impl.py
OracleDevice.get_prediction
python
def get_prediction(self, intent: str, prediction_type: str, request_type: str, task_code: str, label: str, timeout: float = 15.0) -> Optional[oracle.Prediction]: robot_id = self.get_key_value( device_base.KeyValueKey("settings-engine", "", "robot-name")) if robot_id is None: robot_id = "" q = self.send_tagged_request( types_gen.CommandData( ts=utils.timestamp_now(), device_type=self._device_type, device_name=self._device_name, data_type="inference-request", tag=utils.generate_tag(), intent=intent, prediction_type=prediction_type, request_type=request_type, task_code=task_code, label=label, robot_id=robot_id), timeout=timeout, expect_messages=1, expect_cmd_status=False) msgs = thread_util.extract_all_from_queue(q) if not msgs: return None if (msgs[0][0].data_type == "cmd-status" and msgs[0][0].status in {"rejected", "aborted"}): return None if len(msgs) != 1: logging.warning("expected a single message: %s", msgs) return self._prediction_from_message(msgs[0][0], intent, prediction_type, request_type, task_code, label)
Return a current prediction (if available). Args: intent: The intent for the oracle. prediction_type: The prediction_type for the oracle. request_type: The request_type for the oracle. task_code: The task_code for the oracle. label: The label for the oracle. timeout: The optional maximum time to wait for the prediction. If not specified, it defaults to 15 seconds. Returns: The latest prediction, if available.
https://github.com/google-research/pyreach/blob/83cac8e235ba1392dcdc6b8d19202c3eff3ad9a6/pyreach/impl/oracle_impl.py#L143-L193
import logging import threading from typing import Callable, Optional, Set, Tuple from pyreach import core from pyreach import oracle from pyreach.common.python import types_gen from pyreach.impl import device_base from pyreach.impl import requester from pyreach.impl import thread_util from pyreach.impl import utils class OracleDevice(requester.Requester[oracle.Prediction]): _device_type: str _device_name: str _request_lock: threading.Lock _request_enable: bool _request_in_progress: bool _request_intent: str _request_predication_type: str _request_request_type: str _request_task_code: str _request_label: str def __init__(self, device_type: str, device_name: str = "") -> None: super().__init__() self._device_type = device_type self._device_name = device_name self._request_lock = threading.Lock() self._request_enable = False self._request_in_progress = False self._request_intent = "" self._request_prediction_type = "" self._request_request_type = "" self._request_task_code = "" self._request_label = "" def start_tagged_requests(self, intent: str, prediction_type: str, request_type: str, task_code: str, label: str) -> None: with self._request_lock: self._request_enable = True self._request_intent = intent self._request_prediction_type = prediction_type self._request_request_type = request_type self._request_task_code = task_code self._request_label = label self._request_update() def stop_tagged_requests(self) -> None: with self._request_lock: self._request_enable = False def get_cached_prediction(self) -> Optional[oracle.Prediction]: with self._request_lock: cached_prediction = self.get_cached() if cached_prediction is None: return None if self._request_intent != cached_prediction.intent: return None if self._request_prediction_type != cached_prediction.prediction_type: return None if self._request_request_type != cached_prediction.request_type: return None if self._request_task_code != cached_prediction.task_code: return None if self._request_label != cached_prediction.label: return None return cached_prediction def _request_update(self) -> None: with self._request_lock: if self.is_closed(): return if not self._request_enable: return if self._request_in_progress: return self._request_in_progress = True self.get_prediction_callback(self._request_intent, self._request_prediction_type, self._request_request_type, self._request_task_code, self._request_label, self._on_request_completed, self._on_request_error) def _on_request_completed(self, prediction: oracle.Prediction) -> None: self.set_cached(prediction) with self._request_lock: self._request_in_progress = False self._request_update() def _on_request_error(self, status: core.PyReachStatus) -> None: with self._request_lock: self._request_in_progress = False self._request_update() def get_key_values(self) -> Set[device_base.KeyValueKey]: return set([device_base.KeyValueKey("settings-engine", "", "robot-name")])
Apache License 2.0
anyant/rssant
rssant_api/helper.py
reverse_url
python
def reverse_url(url): url = yarl.URL(url) host = '.'.join(reversed(url.host.split('.'))) result = f'{host}!{url.port}!{url.scheme}{url.raw_path_qs}' if url.raw_fragment: result += '#' + url.raw_fragment return result
convert url to reversed url
https://github.com/anyant/rssant/blob/b84490574df81ea58a9dda2bc839bcfa56909745/rssant_api/helper.py#L19-L28
import yarl from collections import defaultdict def shorten(text, width, placeholder='...'): if not text: return text if len(text) <= width: return text return text[: max(0, width - len(placeholder))] + placeholder
BSD 3-Clause New or Revised License
michael-wzhu/daguan_competition_2021_codes
src/Megatron-LM/megatron/optimizer/grad_scaler.py
MegatronGradScaler.__init__
python
def __init__(self, initial_scale): assert initial_scale > 0.0 self._scale = torch.cuda.FloatTensor([initial_scale])
Initialize scale value with the input initial scale.
https://github.com/michael-wzhu/daguan_competition_2021_codes/blob/ab61f7e3ed11802759baf05b2b309448e38cacba/src/Megatron-LM/megatron/optimizer/grad_scaler.py#L26-L29
from abc import ABC from abc import abstractmethod import torch class MegatronGradScaler(ABC):
Apache License 2.0
google-research/meta-dataset
meta_dataset/models/experimental/reparameterizable_distributions.py
DeepDensity.build_meta_parameters
python
def build_meta_parameters(self): setattr(self, self.meta_attribute_name, self.build_class_density_model())
Assign to attributes the meta parameters.
https://github.com/google-research/meta-dataset/blob/67546ff9f6992acfa4bc17edb92c82ff7bbcfbbc/meta_dataset/models/experimental/reparameterizable_distributions.py#L505-L507
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import gin.tf from meta_dataset.learners.experimental import base as learner_base from meta_dataset.models.experimental import reparameterizable_base import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors META_PARAMETER_SCOPE = 'meta_parameter' TASK_PARAMETER_SCOPE = 'task_parameter' def is_meta_variable(obj): return isinstance(obj, tf.Variable) and META_PARAMETER_SCOPE in obj.name def is_task_variable(obj): return isinstance(obj, tf.Variable) and TASK_PARAMETER_SCOPE in obj.name def _assign_pairs(vbls, values): return [vbl.assign(value) for vbl, value in zip(vbls, values)] def _split_and_squeeze(tensor, num_splits, axis=0): return [ tf.squeeze(t) for t in tf.split(tensor, axis=axis, num_or_size_splits=num_splits) ] def fit_gaussian(embeddings, damping=1e-7, full_covariance=False): if full_covariance: num, dim = tf.split(tf.shape(input=embeddings), num_or_size_splits=2) num, dim = tf.squeeze(num), tf.squeeze(dim) sample_mean = tf.reduce_mean(input_tensor=embeddings, axis=0) centered_embeddings = embeddings - sample_mean sample_covariance = tf.einsum('ij,ik->kj', centered_embeddings, centered_embeddings) sample_covariance += damping * tf.eye(dim) sample_covariance /= tf.cast(num, dtype=tf.float32) return sample_mean, sample_covariance else: sample_mean, sample_variances = tf.nn.moments(x=embeddings) log_variances = tf.math.log(sample_variances + damping * tf.ones_like(sample_variances)) return sample_mean, log_variances def fit_gaussian_mixture(embeddings, responsibilities, damping=1e-7, full_covariance=False): num, dim = tf.split(tf.shape(input=embeddings), num_or_size_splits=2) num, dim = tf.squeeze(num), tf.squeeze(dim) num_classes = responsibilities.shape[1] mixing_proportion = tf.einsum('jk->k', responsibilities) mixing_proportion /= tf.cast(num, dtype=tf.float32) mixing_logits = tf.math.log(mixing_proportion) sample_mean = tf.einsum('ij,ik->jk', responsibilities, embeddings) sample_mean /= tf.reduce_sum( input_tensor=responsibilities, axis=0)[:, tf.newaxis] centered_embeddings = ( embeddings[:, tf.newaxis, :] - sample_mean[tf.newaxis, :, :]) if full_covariance: sample_covariance = tf.einsum('ijk,ijl->ijkl', centered_embeddings, centered_embeddings) sample_covariance += damping * tf.eye(dim) weighted_covariance = tf.einsum('ij,ijkl->jkl', responsibilities, sample_covariance) weighted_covariance /= tf.reduce_sum( input_tensor=responsibilities, axis=0)[:, tf.newaxis, tf.newaxis] return ( _split_and_squeeze(sample_mean, num_splits=num_classes), _split_and_squeeze(weighted_covariance, num_splits=num_classes), [mixing_logits], ) else: avg_x_squared = ( tf.matmul(responsibilities, embeddings**2, transpose_a=True) / tf.reduce_sum(input_tensor=responsibilities, axis=0)[:, tf.newaxis]) avg_means_squared = sample_mean**2 avg_x_means = ( sample_mean * tf.matmul(responsibilities, embeddings, transpose_a=True) / tf.reduce_sum(input_tensor=responsibilities, axis=0)[:, tf.newaxis]) sample_variances = ( avg_x_squared - 2 * avg_x_means + avg_means_squared + damping * tf.ones(dim)) log_variances = tf.math.log(sample_variances) return ( _split_and_squeeze(sample_mean, num_splits=num_classes), _split_and_squeeze(log_variances, num_splits=num_classes), [mixing_logits], ) class ReparameterizableClassMixture( reparameterizable_base.ReparameterizableModule): def __init__(self, num_dims, output_dim, name=None): super(ReparameterizableClassMixture, self).__init__(name=name) self.num_dims = num_dims self.num_components = output_dim with tf.compat.v1.name_scope(META_PARAMETER_SCOPE): self.build_meta_parameters() with tf.compat.v1.name_scope(TASK_PARAMETER_SCOPE): self.build_task_parameters() def reparameterizables(self, predicate, with_path=False): def predicate_and_is_task_variable(obj): return predicate(obj) and is_task_variable(obj) return super(ReparameterizableClassMixture, self).reparameterizables( predicate=predicate_and_is_task_variable, with_path=with_path) @property def task_parameters(self): return self.reparameterizables( lambda obj: isinstance(obj, tf.Variable), with_path=False) def trainable_variables(self): def trainable_and_is_meta_variable(obj): return reparameterizable_base.is_trainable_variable( obj) and is_meta_variable(obj) return super(ReparameterizableClassMixture, self).reparameterizables( predicate=trainable_and_is_meta_variable, with_path=False) def build_meta_parameters(self): raise NotImplementedError def build_task_parameters(self): raise NotImplementedError def episodic_init_ops(self, onehot_labels, embeddings): raise NotImplementedError @property def components(self): raise NotImplementedError def __call__(self, embeddings): return tf.stack([c.log_prob(embeddings) for c in self.components], axis=-1) @gin.configurable class MultivariateNormalDiag(ReparameterizableClassMixture): def __init__(self, estimate_scale, damping, **kwargs): super(MultivariateNormalDiag, self).__init__(**kwargs) self.damping = damping self.estimate_scale = estimate_scale def build_meta_parameters(self): return def build_task_parameters(self): self.locs = [ tf.Variable(tf.zeros((self.num_dims)), name='loc_{}'.format(i)) for i in range(self.num_components) ] self.log_scales = [ tf.Variable(tf.zeros((self.num_dims)), name='log_scale_{}'.format(i)) for i in range(self.num_components) ] def episodic_init_ops(self, onehot_labels, embeddings, task_parameters): del task_parameters class_embeddings = learner_base.class_specific_data(onehot_labels, embeddings, self.num_components) locs, log_scales = zip( *map(fit_gaussian, class_embeddings, itertools.repeat(self.damping))) if not self.estimate_scale: log_scales = [tf.zeros_like(log_scale) for log_scale in log_scales] return (_assign_pairs(self.locs, locs) + _assign_pairs(self.log_scales, log_scales)) @property def components(self): return [ tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.math.softplus(log_scale), allow_nan_stats=False) for loc, log_scale in zip(self.locs, self.log_scales) ] @gin.configurable class GaussianMixture(ReparameterizableClassMixture): def __init__(self, num_modes, damping, loc_initializer=tf.initializers.random_uniform(), log_scale_initializer=tf.initializers.zeros(), logits_initializer=tf.initializers.zeros(), trainable_loc=True, trainable_scale=True, trainable_logits=True, estimate_loc=True, estimate_scale=True, estimate_logits=True, **kwargs): self.num_modes = num_modes self.damping = damping self.loc_initializer = loc_initializer self.log_scale_initializer = log_scale_initializer self.logits_initializer = logits_initializer self.trainable_loc = trainable_loc self.trainable_scale = trainable_scale self.trainable_logits = trainable_logits self.estimate_loc = estimate_loc self.estimate_scale = estimate_scale self.estimate_logits = estimate_logits super(GaussianMixture, self).__init__(**kwargs) def build_meta_parameters(self): self.meta_loc = tf.Variable( self.loc_initializer([self.num_modes, self.num_dims]), trainable=self.trainable_loc, name='meta_loc') self.meta_log_scale = tf.Variable( self.log_scale_initializer([self.num_modes, self.num_dims]), trainable=self.trainable_scale, name='meta_log_scale') self.meta_logits = tf.Variable( self.logits_initializer([self.num_modes]), trainable=self.trainable_logits, name='meta_logits') def build_task_parameters(self): def _construct_variables(): def _split_mode_params(params): return [ tf.squeeze(p) for p in tf.split( params, axis=0, num_or_size_splits=self.num_modes) ] locs = _split_mode_params(tf.zeros_like(self.meta_loc)) log_scales = _split_mode_params(tf.zeros_like(self.meta_log_scale)) logits = tf.zeros_like(self.meta_logits) return ( [tf.Variable(loc, 'loc') for loc in locs], [tf.Variable(log_scale, 'log_scale') for log_scale in log_scales], tf.Variable(logits, 'logits'), ) locs, log_scales, logits = [], [], [] for i in range(self.num_components): with tf.compat.v1.name_scope('class_{}'.format(i)): class_locs, class_log_scales, class_logits = _construct_variables() locs += [class_locs] log_scales += [class_log_scales] logits += [class_logits] self.task_locs = locs self.task_log_scales = log_scales self.task_logits = logits def episodic_init_ops(self, onehot_labels, embeddings): del onehot_labels del embeddings def _split_mode_params(params): return [ tf.squeeze(p) for p in tf.split(params, axis=0, num_or_size_splits=self.num_modes) ] init_ops = [] for component_locs, component_log_scales, component_logits in zip( self.task_locs, self.task_log_scales, self.task_logits): init_ops += _assign_pairs(component_locs, _split_mode_params(self.meta_loc)) init_ops += _assign_pairs(component_log_scales, _split_mode_params(self.meta_log_scale)) init_ops += [component_logits.assign(self.meta_logits)] return init_ops @property def components(self): return [ tfd.Mixture( cat=tfd.Categorical(logits=logits), components=[ tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.math.softplus(log_scale), allow_nan_stats=False) for loc, log_scale in zip(locs, log_scales) ]) for locs, log_scales, logits in zip( self.task_locs, self.task_log_scales, self.task_logits) ] def __call__(self, embeddings, components=False, class_idx=None): if class_idx: class_models = [self.components[i] for i in class_idx] else: class_models = self.components if components: return tf.stack([ tf.stack([c.log_prob(embeddings) for c in cs.components], axis=-1) for cs in class_models ], axis=1) else: return tf.stack([c.log_prob(embeddings) for c in class_models], axis=-1) class DeepDensity(ReparameterizableClassMixture): @property def meta_attribute_name(self): raise NotImplementedError @property def task_attribute_name(self): raise NotImplementedError @property def keyed_variables(self): return def build_class_density_model(self): raise NotImplementedError def episodic_init_ops(self, onehot_labels, embeddings): del onehot_labels del embeddings init_ops = [] for i in range(self.num_components): component_variables = dict( (k, v) for k, v in self.keyed_variables.items() if '{}/{}'.format(self.task_attribute_name, i) in k) sorted_component_variable_keys = sorted(component_variables.keys()) sorted_meta_variable_keys = sorted(self.meta_parameters.keys()) sorted_component_variables = ( component_variables[k] for k in sorted_component_variable_keys) sorted_meta_variables = ( self.meta_parameters[k] for k in sorted_meta_variable_keys) init_ops += [ assignee_variables.assign(assigner_variables) for assignee_variables, assigner_variables in zip( sorted_component_variables, sorted_meta_variables) ] return init_ops def trainable_variables(self): return dict((k, v) for k, v in self.keyed_variables.items() if self.task_attribute_name in k)
Apache License 2.0
mozilla/moztrap
moztrap/model/library/bulk.py
BulkParser.description
python
def description(self, lc, orig, data): if lc.startswith("when ") or lc.startswith("and when "): data[-1].setdefault("description", []) data[-1]["steps"] = [{"instruction": [orig]}] return self.instruction data[-1].setdefault("description", []).append(orig) return self.description
Expecting to encounter description line(s).
https://github.com/mozilla/moztrap/blob/93b34a4cd21c9e08f73d3b1a7630cd873f8418a0/moztrap/model/library/bulk.py#L83-L90
class ParsingError(Exception): pass class BulkParser(object): def parse(self, text): data = [] state = self.begin lines = text.splitlines() error = False for line in lines: line = line.strip() if line: try: state = state(line.lower(), line, data) except ParsingError as e: data = data or [{}] data[-1]["error"] = str(e) error = True break if not error and not state.expect_end: if not data: data.append({}) data[-1]["error"] = ( "Unexpected end of input, looking for %s" % " or ".join(repr(k.title()) for k in state.keys) ) for item in data: if "description" in item: item["description"] = "\n".join(item["description"]) for step in item.get("steps", []): step["instruction"] = "\n".join(step["instruction"]) if "expected" in step: step["expected"] = "\n".join(step["expected"]) return data def begin(self, lc, orig, data): if lc.startswith("test that "): if len(orig) > 200: data.append({}) raise ParsingError("Title should have at most 200 chracters, '%s...'" % orig[0:50]) data.append({"name": orig}) return self.description raise ParsingError("Expected 'Test that ...', not '%s'" % orig) begin.keys = ["Test that "] begin.expect_end = False
BSD 2-Clause Simplified License
chahuja/language2pose
src/data/data.py
RawData.quat2fke
python
def quat2fke(self, df_quat, filename_fke, filename_rifke): df_fke = pd.DataFrame(data=np.zeros((df_quat.shape[0], len(self.fke_columns))), columns=self.fke_columns) df_fke[['root_tx', 'root_ty', 'root_tz']] = df_quat.loc[:, ['root_tx', 'root_ty', 'root_tz']].copy() xyz_data = quat2xyz(df_quat, self.skel) df_fke.loc[:, self.fke_columns] = xyz_data.reshape(-1, np.prod(xyz_data.shape[1:])) os.makedirs(filename_fke.parent, exist_ok=True) df_fke.to_csv(filename_fke.as_posix()) '''Save Rotation Invariant Forward Kinematics''' df_rifke = pd.DataFrame(data=np.zeros((df_quat.shape[0]-1, len(self.rifke_columns))), columns=self.rifke_columns) rifke_data = self.fke2rifke(xyz_data.copy()) df_rifke[self.rifke_columns] = rifke_data[..., 3:] os.makedirs(filename_rifke.parent, exist_ok=True) df_rifke.to_csv(filename_rifke.as_posix()) ''' Convert rifke to fke to get comparable ground truths ''' new_df_fke = pd.DataFrame(data=self.rifke2fke(df_rifke[self.rifke_columns].values, filename_rifke).reshape(-1, len(self.fke_columns)), columns=self.fke_columns) new_fke_dir = filename_fke.parent/'new_fke' os.makedirs(new_fke_dir, exist_ok=True) new_df_fke.to_csv((new_fke_dir/filename_fke.name).as_posix()) return xyz_data
Save Forward Kinematics
https://github.com/chahuja/language2pose/blob/a65d6857d504b5c7cc154260ee946224d387da9d/src/data/data.py#L116-L142
import os import numpy as np import pandas as pd from pathlib import Path from tqdm import tqdm import json import os,sys,inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0,parentdir) from utils.visualization import * from utils.skeleton import Skeleton from common.mmm import parse_motions from common.transforms3dbatch import * from common.quaternion import * from renderUtils import quat2xyz from model.model import Integrator import torch import pickle as pkl import scipy.ndimage.filters as filters import pdb def permute(parents, root=0, new_parent=-1, new_joints=[], new_parents=[]): new_joints.append(root) new_parents.append(new_parent) new_parent = len(new_joints) - 1 for idx, p in enumerate(parents): if p == root: permute(parents, root=idx, new_parent=new_parent, new_joints=new_joints, new_parents=new_parents) return new_joints, new_parents def softmax(x, **kw): softness = kw.pop('softness', 1.0) maxi, mini = np.max(x, **kw), np.min(x, **kw) return maxi + np.log(softness + np.exp(mini - maxi)) def softmin(x, **kw): return -softmax(-x, **kw) class RawData(): def __init__(self): pass def _get_f(self): raise NotImplementedError def _get_df(self): raise NotImplementedError def preProcess(self): raise NotImplementedError def get_skeletonNpermutation(self): raise NotImplementedError @property def quat_columns(self): quat_columns = ['root_tx', 'root_ty', 'root_tz'] for joint in self.skel.joints: quat_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rw', 'rx', 'ry', 'rz']] return quat_columns @property def fke_columns(self): fke_columns = [] for joint in self.skel.joints: fke_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['tx', 'ty', 'tz']] return fke_columns @property def pose_columns(self): pose_columns = [] for joint in self.skel.joints: pose_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rx', 'ry', 'rz']] return pose_columns @property def rifke_columns(self): rifke_columns = self.fke_columns + ['root_Vx', 'root_Vz', 'root_Ry', 'feet_l1', 'feet_l2', 'feet_r1', 'feet_r2'] return rifke_columns @property def rifke_dict(self): raise NotImplementedError def output_columns(self, feats_kind): if feats_kind in {'euler'}: return self.pose_columns elif feats_kind in {'quaternion'}: return self.quat_columns elif feats_kind in {'fke'}: return self.fke_columns elif feats_kind in {'rifke'}: return self.rifke_columns def mat2csv(self, data, filename, columns): pd.DataFrame(data=data, columns=columns).to_csv(filename)
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/config/paginator.py
GetAggregateComplianceDetailsByConfigRule.paginate
python
def paginate(self, ConfigurationAggregatorName: str, ConfigRuleName: str, AccountId: str, AwsRegion: str, ComplianceType: str = None, PaginationConfig: Dict = None) -> Dict: pass
Creates an iterator that will paginate through responses from :py:meth:`ConfigService.Client.get_aggregate_compliance_details_by_config_rule`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateComplianceDetailsByConfigRule>`_ **Request Syntax** :: response_iterator = paginator.paginate( ConfigurationAggregatorName='string', ConfigRuleName='string', AccountId='string', AwsRegion='string', ComplianceType='COMPLIANT'|'NON_COMPLIANT'|'NOT_APPLICABLE'|'INSUFFICIENT_DATA', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'AggregateEvaluationResults': [ { 'EvaluationResultIdentifier': { 'EvaluationResultQualifier': { 'ConfigRuleName': 'string', 'ResourceType': 'string', 'ResourceId': 'string' }, 'OrderingTimestamp': datetime(2015, 1, 1) }, 'ComplianceType': 'COMPLIANT'|'NON_COMPLIANT'|'NOT_APPLICABLE'|'INSUFFICIENT_DATA', 'ResultRecordedTime': datetime(2015, 1, 1), 'ConfigRuleInvokedTime': datetime(2015, 1, 1), 'Annotation': 'string', 'AccountId': 'string', 'AwsRegion': 'string' }, ], } **Response Structure** - *(dict) --* - **AggregateEvaluationResults** *(list) --* Returns an AggregateEvaluationResults object. - *(dict) --* The details of an AWS Config evaluation for an account ID and region in an aggregator. Provides the AWS resource that was evaluated, the compliance of the resource, related time stamps, and supplementary information. - **EvaluationResultIdentifier** *(dict) --* Uniquely identifies the evaluation result. - **EvaluationResultQualifier** *(dict) --* Identifies an AWS Config rule used to evaluate an AWS resource, and provides the type and ID of the evaluated resource. - **ConfigRuleName** *(string) --* The name of the AWS Config rule that was used in the evaluation. - **ResourceType** *(string) --* The type of AWS resource that was evaluated. - **ResourceId** *(string) --* The ID of the evaluated AWS resource. - **OrderingTimestamp** *(datetime) --* The time of the event that triggered the evaluation of your AWS resources. The time can indicate when AWS Config delivered a configuration item change notification, or it can indicate when AWS Config delivered the configuration snapshot, depending on which event triggered the evaluation. - **ComplianceType** *(string) --* The resource compliance status. For the ``AggregationEvaluationResult`` data type, AWS Config supports only the ``COMPLIANT`` and ``NON_COMPLIANT`` . AWS Config does not support the ``NOT_APPLICABLE`` and ``INSUFFICIENT_DATA`` value. - **ResultRecordedTime** *(datetime) --* The time when AWS Config recorded the aggregate evaluation result. - **ConfigRuleInvokedTime** *(datetime) --* The time when the AWS Config rule evaluated the AWS resource. - **Annotation** *(string) --* Supplementary information about how the agrregate evaluation determined the compliance. - **AccountId** *(string) --* The 12-digit account ID of the source account. - **AwsRegion** *(string) --* The source region from where the data is aggregated. :type ConfigurationAggregatorName: string :param ConfigurationAggregatorName: **[REQUIRED]** The name of the configuration aggregator. :type ConfigRuleName: string :param ConfigRuleName: **[REQUIRED]** The name of the AWS Config rule for which you want compliance information. :type AccountId: string :param AccountId: **[REQUIRED]** The 12-digit account ID of the source account. :type AwsRegion: string :param AwsRegion: **[REQUIRED]** The source region from where the data is aggregated. :type ComplianceType: string :param ComplianceType: The resource compliance status. .. note:: For the ``GetAggregateComplianceDetailsByConfigRuleRequest`` data type, AWS Config supports only the ``COMPLIANT`` and ``NON_COMPLIANT`` . AWS Config does not support the ``NOT_APPLICABLE`` and ``INSUFFICIENT_DATA`` values. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/config/paginator.py#L986-L1089
from typing import Dict from datetime import datetime from typing import List from botocore.paginate import Paginator class DescribeAggregateComplianceByConfigRules(Paginator): def paginate(self, ConfigurationAggregatorName: str, Filters: Dict = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeAggregationAuthorizations(Paginator): def paginate(self, PaginationConfig: Dict = None) -> Dict: pass class DescribeComplianceByConfigRule(Paginator): def paginate(self, ConfigRuleNames: List = None, ComplianceTypes: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeComplianceByResource(Paginator): def paginate(self, ResourceType: str = None, ResourceId: str = None, ComplianceTypes: List = None, Limit: int = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeConfigRuleEvaluationStatus(Paginator): def paginate(self, ConfigRuleNames: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeConfigRules(Paginator): def paginate(self, ConfigRuleNames: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeConfigurationAggregatorSourcesStatus(Paginator): def paginate(self, ConfigurationAggregatorName: str, UpdateStatus: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeConfigurationAggregators(Paginator): def paginate(self, ConfigurationAggregatorNames: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribePendingAggregationRequests(Paginator): def paginate(self, PaginationConfig: Dict = None) -> Dict: pass class DescribeRemediationExecutionStatus(Paginator): def paginate(self, ConfigRuleName: str, ResourceKeys: List = None, PaginationConfig: Dict = None) -> Dict: pass class DescribeRetentionConfigurations(Paginator): def paginate(self, RetentionConfigurationNames: List = None, PaginationConfig: Dict = None) -> Dict: pass class GetAggregateComplianceDetailsByConfigRule(Paginator):
MIT License
mandiant/capa
capa/features/extractors/smda/file.py
extract_file_function_names
python
def extract_file_function_names(smda_report, **kwargs): if False: yield NotImplementedError("SMDA doesn't have library matching") return
extract the names of statically-linked library functions.
https://github.com/mandiant/capa/blob/23a0aec1e6de99363023d327d1746b10968658c1/capa/features/extractors/smda/file.py#L63-L70
import lief import capa.features.extractors.common import capa.features.extractors.helpers import capa.features.extractors.strings from capa.features.file import Export, Import, Section from capa.features.common import String, Characteristic def extract_file_embedded_pe(buf, **kwargs): for offset, _ in capa.features.extractors.helpers.carve_pe(buf, 1): yield Characteristic("embedded pe"), offset def extract_file_export_names(buf, **kwargs): lief_binary = lief.parse(buf) if lief_binary is not None: for function in lief_binary.exported_functions: yield Export(function.name), function.address def extract_file_import_names(smda_report, buf): lief_binary = lief.parse(buf) if not isinstance(lief_binary, lief.PE.Binary): return for imported_library in lief_binary.imports: library_name = imported_library.name.lower() library_name = library_name[:-4] if library_name.endswith(".dll") else library_name for func in imported_library.entries: va = func.iat_address + smda_report.base_addr if func.name: for name in capa.features.extractors.helpers.generate_symbols(library_name, func.name): yield Import(name), va elif func.is_ordinal: for name in capa.features.extractors.helpers.generate_symbols(library_name, "#%s" % func.ordinal): yield Import(name), va def extract_file_section_names(buf, **kwargs): lief_binary = lief.parse(buf) if not isinstance(lief_binary, lief.PE.Binary): return if lief_binary and lief_binary.sections: base_address = lief_binary.optional_header.imagebase for section in lief_binary.sections: yield Section(section.name), base_address + section.virtual_address def extract_file_strings(buf, **kwargs): for s in capa.features.extractors.strings.extract_ascii_strings(buf): yield String(s.s), s.offset for s in capa.features.extractors.strings.extract_unicode_strings(buf): yield String(s.s), s.offset
Apache License 2.0
pypa/pipenv
pipenv/patched/notpip/_internal/commands/show.py
print_results
python
def print_results(distributions, list_files=False, verbose=False): results_printed = False for i, dist in enumerate(distributions): results_printed = True if i > 0: write_output("---") write_output("Name: %s", dist.get('name', '')) write_output("Version: %s", dist.get('version', '')) write_output("Summary: %s", dist.get('summary', '')) write_output("Home-page: %s", dist.get('home-page', '')) write_output("Author: %s", dist.get('author', '')) write_output("Author-email: %s", dist.get('author-email', '')) write_output("License: %s", dist.get('license', '')) write_output("Location: %s", dist.get('location', '')) write_output("Requires: %s", ', '.join(dist.get('requires', []))) write_output("Required-by: %s", ', '.join(dist.get('required_by', []))) if verbose: write_output("Metadata-Version: %s", dist.get('metadata-version', '')) write_output("Installer: %s", dist.get('installer', '')) write_output("Classifiers:") for classifier in dist.get('classifiers', []): write_output(" %s", classifier) write_output("Entry-points:") for entry in dist.get('entry_points', []): write_output(" %s", entry.strip()) if list_files: write_output("Files:") for line in dist.get('files', []): write_output(" %s", line.strip()) if "files" not in dist: write_output("Cannot locate installed-files.txt") return results_printed
Print the informations from installed distributions found.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/patched/notpip/_internal/commands/show.py#L143-L180
from __future__ import absolute_import import logging import os from email.parser import FeedParser from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS from pipenv.patched.notpip._internal.utils.misc import write_output logger = logging.getLogger(__name__) class ShowCommand(Command): usage = """ %prog [options] <package> ...""" ignore_require_venv = True def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args results = search_packages_info(query) if not print_results( results, list_files=options.files, verbose=options.verbose): return ERROR return SUCCESS def search_packages_info(query): installed = {} for p in pkg_resources.working_set: installed[canonicalize_name(p.project_name)] = p query_names = [canonicalize_name(name) for name in query] missing = sorted( [name for name, pkg in zip(query, query_names) if pkg not in installed] ) if missing: logger.warning('Package(s) not found: %s', ', '.join(missing)) def get_requiring_packages(package_name): canonical_name = canonicalize_name(package_name) return [ pkg.project_name for pkg in pkg_resources.working_set if canonical_name in [canonicalize_name(required.name) for required in pkg.requires()] ] for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'requires': [dep.project_name for dep in dist.requires()], 'required_by': get_requiring_packages(dist.project_name) } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): package['installer'] = line.strip() break feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) classifiers = [] for line in metadata.splitlines(): if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package
MIT License
pylons/pastedeploy
paste/deploy/config.py
DispatchingConfig.push_process_config
python
def push_process_config(self, conf): self._process_configs.append(conf)
Like push_thread_config, but applies the configuration to the entire process.
https://github.com/pylons/pastedeploy/blob/df109cacddc56af8cfe75fc7df39af6e1f2cd92d/paste/deploy/config.py#L90-L95
import threading import re wsgilib = None local = None __all__ = ['DispatchingConfig', 'CONFIG', 'ConfigMiddleware', 'PrefixMiddleware'] def local_dict(): global config_local, local try: return config_local.wsgi_dict except NameError: config_local = threading.local() config_local.wsgi_dict = result = {} return result except AttributeError: config_local.wsgi_dict = result = {} return result class DispatchingConfig(object): _constructor_lock = threading.Lock() def __init__(self): self._constructor_lock.acquire() try: self.dispatching_id = 0 while 1: self._local_key = 'paste.processconfig_%i' % self.dispatching_id if not self._local_key in local_dict(): break self.dispatching_id += 1 finally: self._constructor_lock.release() self._process_configs = [] def push_thread_config(self, conf): local_dict().setdefault(self._local_key, []).append(conf) def pop_thread_config(self, conf=None): self._pop_from(local_dict()[self._local_key], conf) def _pop_from(self, lst, conf): popped = lst.pop() if conf is not None and popped is not conf: raise AssertionError( "The config popped (%s) is not the same as the config " "expected (%s)" % (popped, conf))
MIT License
li-plus/dsnet
src/anchor_based/anchor_helper.py
offset2bbox
python
def offset2bbox(offsets: np.ndarray, anchors: np.ndarray) -> np.ndarray: offsets = offsets.reshape(-1, 2) anchors = anchors.reshape(-1, 2) offset_center, offset_width = offsets[:, 0], offsets[:, 1] anchor_center, anchor_width = anchors[:, 0], anchors[:, 1] bbox_center = offset_center * anchor_width + anchor_center bbox_width = np.exp(offset_width) * anchor_width bbox = np.vstack((bbox_center, bbox_width)).T return bbox
Convert predicted offsets to CW bounding boxes. :param offsets: Predicted offsets. :param anchors: Sequence anchors. :return: Predicted bounding boxes.
https://github.com/li-plus/dsnet/blob/1804176e2e8b57846beb063667448982273fca89/src/anchor_based/anchor_helper.py#L74-L93
from typing import List, Tuple import numpy as np from helpers import bbox_helper def get_anchors(seq_len: int, scales: List[int]) -> np.ndarray: anchors = np.zeros((seq_len, len(scales), 2), dtype=np.int32) for pos in range(seq_len): for scale_idx, scale in enumerate(scales): anchors[pos][scale_idx] = [pos, scale] return anchors def get_pos_label(anchors: np.ndarray, targets: np.ndarray, iou_thresh: float ) -> Tuple[np.ndarray, np.ndarray]: seq_len, num_scales, _ = anchors.shape anchors = np.reshape(anchors, (seq_len * num_scales, 2)) loc_label = np.zeros((seq_len * num_scales, 2)) cls_label = np.zeros(seq_len * num_scales, dtype=np.int32) for target in targets: target = np.tile(target, (seq_len * num_scales, 1)) iou = bbox_helper.iou_cw(anchors, target) pos_idx = np.where(iou > iou_thresh) cls_label[pos_idx] = 1 loc_label[pos_idx] = bbox2offset(target[pos_idx], anchors[pos_idx]) loc_label = loc_label.reshape((seq_len, num_scales, 2)) cls_label = cls_label.reshape((seq_len, num_scales)) return cls_label, loc_label def get_neg_label(cls_label: np.ndarray, num_neg: int) -> np.ndarray: seq_len, num_scales = cls_label.shape cls_label = cls_label.copy().reshape(-1) cls_label[cls_label < 0] = 0 neg_idx, = np.where(cls_label == 0) np.random.shuffle(neg_idx) neg_idx = neg_idx[:num_neg] cls_label[neg_idx] = -1 cls_label = np.reshape(cls_label, (seq_len, num_scales)) return cls_label
MIT License
linode/linode-cli
linodecli/configuration.py
CLIConfig._username_for_token
python
def _username_for_token(self, token): u = self._do_get_request('/profile', token=token, exit_on_error=False) if "errors" in u: print("That token didn't work: {}".format(','.join([c["reason"] for c in u['errors']]))) return None return u['username']
A helper function that returns the username assocaited with a token by requesting it from the API
https://github.com/linode/linode-cli/blob/abda241415d5805ee17f1697494a013d408d30ca/linodecli/configuration.py#L288-L298
from __future__ import print_function import argparse from datetime import datetime import re from http import server import socket import webbrowser try: import configparser except ImportError: import ConfigParser as configparser import requests import os import sys ENV_TOKEN_NAME='LINODE_CLI_TOKEN' LEGACY_CONFIG_DIR = os.path.expanduser('~') LEGACY_CONFIG_NAME = '.linode-cli' CONFIG_DIR = os.environ.get('XDG_CONFIG_HOME', "{}/{}".format(os.path.expanduser('~'), '.config')) CONFIG_NAME = 'linode-cli' TOKEN_GENERATION_URL='https://cloud.linode.com/profile/tokens' OAUTH_CLIENT_ID = '5823b4627e45411d18e9' KNOWN_GOOD_BROWSERS = set(('chrome', 'firefox', 'mozilla', 'netscape', 'opera', 'safari', 'chromium', 'chromium-browser', 'epiphany')) DEFAULT_LANDING_PAGE = """ <h2>Success</h2><br/><p>You may return to your terminal to continue..</p> <script> // this is gross, sorry let r = new XMLHttpRequest('http://localhost:{port}'); r.open('GET', '/token/'+window.location.hash.substr(1)); r.send(); </script> """ def input_helper(prompt): if sys.version_info[0] == 2: return raw_input(prompt) else: return input(prompt) class CLIConfig: def __init__(self, base_url, username=None, skip_config=False): self.base_url = base_url self.username = username self.config = self._get_config(load=not skip_config) self.running_plugin = None self.used_env_token = False self._configured = False self.configure_with_pat = "--token" in sys.argv if not skip_config and not self.config.has_option('DEFAULT', 'default-user') and self.config.has_option('DEFAULT', 'token'): self._handle_no_default_user() environ_token = os.environ.get(ENV_TOKEN_NAME, None) if (not self.config.has_option('DEFAULT', 'default-user') and not skip_config and not environ_token): self.configure() elif environ_token: self.used_env_token = True def set_user(self, username): if not self.config.has_section(username): print('User {} is not configured!'.format(username)) sys.exit(1) self.username = username def default_username(self): if self.config.has_option('DEFAULT', "default-user"): return self.config.get('DEFAULT', 'default-user') return "" def update_namespace(self, namespace, new_dict): ns_dict = vars(namespace) for k in new_dict: if k.startswith('plugin-'): continue if k in ns_dict and ns_dict[k] is None: ns_dict[k] = new_dict[k] return argparse.Namespace(**ns_dict) def update(self, namespace, allowed_defaults): if self.used_env_token and self.config is None: return username = self.username or self.default_username() if not self.config.has_option(username, 'token') and not os.environ.get(ENV_TOKEN_NAME, None): print("User {} is not configured.".format(username)) sys.exit(1) if self.config.has_section(username) and allowed_defaults: update_dicts = { default_key: self.config.get(username, default_key) for default_key in allowed_defaults if self.config.has_option(username, default_key) } return self.update_namespace(namespace, update_dicts) return namespace def get_token(self): if self.used_env_token: return os.environ.get(ENV_TOKEN_NAME, None) if self.config.has_option(self.username or self.default_username(), "token"): return self.config.get(self.username or self.default_username(), "token") return "" def remove_user(self, username): if self.default_username() == username: print('Cannot remote {} as they are the default user! You can change ' 'the default user with: `linode-cli set-user USERNAME`'.format(username)) sys.exit(1) if self.config.has_section(username): self.config.remove_section(username) self.write_config() def print_users(self): print('Configured Users: ') default_user = self.default_username() for sec in self.config.sections(): if sec != 'DEFAULT': print('{} {}'.format('*' if sec == default_user else ' ', sec)) sys.exit(0) def set_default_user(self, username): if not self.config.has_section(username): print('User {} is not configured!'.format(username)) sys.exit(1) self.config.set('DEFAULT', 'default-user', username) self.write_config() def get_value(self, key): username = self.username or self.default_username() if not self.config.has_option(username, key): return None return self.config.get(username, key) def plugin_set_value(self, key, value): if self.running_plugin is None: raise RuntimeError('No running plugin to retrieve configuration for!') username = self.username or self.default_username() self.config.set(username, 'plugin-{}-{}'.format(self.running_plugin, key), value) def plugin_get_value(self, key): if self.running_plugin is None: raise RuntimeError('No running plugin to retrieve configuration for!') username = self.username or self.default_username() full_key = 'plugin-{}-{}'.format(self.running_plugin, key) if not self.config.has_option(username, full_key): return None return self.config.get(username, full_key) def write_config(self, silent=False): if not os.path.exists("{}/{}".format(os.path.expanduser('~'), '.config')): os.makedirs("{}/{}".format(os.path.expanduser('~'), '.config')) with open(self._get_config_path(), 'w') as f: self.config.write(f) if not silent: print("\nConfig written to {}".format(self._get_config_path()))
BSD 3-Clause New or Revised License
spcl/dace
dace/codegen/targets/fpga.py
is_fpga_kernel
python
def is_fpga_kernel(sdfg, state): if ("is_FPGA_kernel" in state.location and state.location["is_FPGA_kernel"] == False): return False data_nodes = state.data_nodes() if len(data_nodes) == 0: return False for n in data_nodes: if n.desc(sdfg).storage not in (dtypes.StorageType.FPGA_Global, dtypes.StorageType.FPGA_Local, dtypes.StorageType.FPGA_Registers, dtypes.StorageType.FPGA_ShiftRegister): return False return True
Returns whether the given state is an FPGA kernel and should be dispatched to the FPGA code generator. :return: True if this is an FPGA kernel, False otherwise.
https://github.com/spcl/dace/blob/4c6695daaa43df22548b987024d1b681e92c7983/dace/codegen/targets/fpga.py#L57-L75
from six import StringIO import collections import enum import functools import itertools import re import warnings import sympy as sp import numpy as np from typing import Dict, Iterable, List, Set, Tuple, Union import copy import dace from dace.codegen.targets import cpp from dace import subsets, data as dt, dtypes, memlet, symbolic from dace.config import Config from dace.frontend import operations from dace.sdfg import SDFG, nodes, utils, dynamic_map_inputs from dace.sdfg import ScopeSubgraphView, find_input_arraynode, find_output_arraynode from dace.codegen import exceptions as cgx from dace.codegen.codeobject import CodeObject from dace.codegen.dispatcher import DefinedType from dace.codegen.prettycode import CodeIOStream from dace.codegen.targets.target import (TargetCodeGenerator, IllegalCopy, make_absolute) from dace.codegen import cppunparse from dace.properties import Property, make_properties, indirect_properties from dace.symbolic import evaluate from dace.transformation.dataflow import MapUnroll from collections import defaultdict _CPU_STORAGE_TYPES = { dtypes.StorageType.CPU_Heap, dtypes.StorageType.CPU_ThreadLocal, dtypes.StorageType.CPU_Pinned } _FPGA_STORAGE_TYPES = { dtypes.StorageType.FPGA_Global, dtypes.StorageType.FPGA_Local, dtypes.StorageType.FPGA_Registers, dtypes.StorageType.FPGA_ShiftRegister } _FPGA_LOCAL_STORAGE_TYPES = { dtypes.StorageType.FPGA_Local, dtypes.StorageType.FPGA_Registers, dtypes.StorageType.FPGA_ShiftRegister } def vector_element_type_of(dtype): if isinstance(dtype, dace.pointer): return vector_element_type_of(dtype.base_type) elif isinstance(dtype, dace.vector): return dtype.base_type return dtype
BSD 3-Clause New or Revised License
peerassets/pypeerassets
pypeerassets/provider/explorer.py
Explorer.getaddress
python
def getaddress(self, address: str) -> dict: return cast(dict, self.ext_fetch('getaddress/' + address))
Returns information for given address.
https://github.com/peerassets/pypeerassets/blob/d1be8db9b2d0355f33aaf1d958086ca0f5042f69/pypeerassets/provider/explorer.py#L106-L109
from decimal import Decimal from http.client import HTTPResponse import json from typing import Union, cast from urllib.request import urlopen from btcpy.structs.transaction import ScriptSig, Sequence, TxIn from pypeerassets.exceptions import InsufficientFunds, UnsupportedNetwork from pypeerassets.provider.common import Provider class Explorer(Provider): def __init__(self, network: str) -> None: self.net = self._netname(network)['short'] if 'ppc' not in self.net: raise UnsupportedNetwork('This API only supports Peercoin.') def api_fetch(self, command: str) -> Union[dict, int, float, str]: apiurl = 'https://explorer.peercoin.net/api/' if self.is_testnet: apiurl = 'https://testnet-explorer.peercoin.net/api/' response = cast(HTTPResponse, urlopen(apiurl + command)) if response.status != 200: raise Exception(response.reason) r = response.read() try: return json.loads(r.decode()) except json.decoder.JSONDecodeError: return r.decode() def ext_fetch(self, command: str) -> Union[dict, int, float, str]: extapiurl = 'https://explorer.peercoin.net/ext/' if self.is_testnet: extapiurl = 'https://testnet-explorer.peercoin.net/ext/' response = cast(HTTPResponse, urlopen(extapiurl + command)) if response.status != 200: raise Exception(response.reason) try: return json.loads(response.read().decode()) except json.decoder.JSONDecodeError: return response.read().decode() def getdifficulty(self) -> dict: return cast(dict, self.api_fetch('getdifficulty')) def getconnectioncount(self) -> int: return cast(int, self.api_fetch('getconnectioncount')) def getblockcount(self) -> int: return cast(int, self.api_fetch('getblockcount')) def getblockhash(self, index: int) -> str: return cast(str, self.api_fetch('getblockhash?index=' + str(index))) def getblock(self, hash: str) -> dict: return cast(dict, self.api_fetch('getblock?hash=' + hash)) def getrawtransaction(self, txid: str, decrypt: int=0) -> dict: q = 'getrawtransaction?txid={txid}&decrypt={decrypt}'.format(txid=txid, decrypt=decrypt) return cast(dict, self.api_fetch(q)) def getnetworkghps(self) -> float: return cast(float, self.api_fetch('getnetworkghps')) def getmoneysupply(self) -> Decimal: return Decimal(cast(float, self.ext_fetch('getmoneysupply'))) def getdistribution(self) -> dict: return cast(dict, self.ext_fetch('getdistribution'))
BSD 3-Clause New or Revised License
dme722/ayx-blackbird
ayx_blackbird/mixins/anchor_utils_mixin.py
AnchorUtilsMixin.get_input_anchor
python
def get_input_anchor(self, input_anchor_name: str) -> InputAnchor: try: return [ anchor for anchor in self.input_anchors if anchor.name == input_anchor_name ][0] except IndexError: raise AnchorNotFoundException(f"{input_anchor_name} not found.")
Get an input anchor by name.
https://github.com/dme722/ayx-blackbird/blob/16533e22e3cbe36621328d94500a6e58ec0c73ea/ayx_blackbird/mixins/anchor_utils_mixin.py#L26-L35
from typing import List from ..anchors import InputAnchor, OutputAnchor from ..utilities.constants import ConnectionStatus from ..utilities.exceptions import AnchorNotFoundException class AnchorUtilsMixin: def __init__(self) -> None: self.input_anchors: List[InputAnchor] = [] self.output_anchors: List[OutputAnchor] = [] def push_all_metadata(self) -> None: for anchor in self.output_anchors: anchor.push_metadata() def close_output_anchors(self) -> None: for anchor in self.output_anchors: anchor.close()
Apache License 2.0
qiskit/qiskit-ibmq-provider
test/fake_account_client.py
BaseFakeAccountClient.__init__
python
def __init__(self, job_limit=-1, job_class=BaseFakeJob): self._jobs = {} self._results_retrieved = set() self._job_limit = job_limit self._executor = ThreadPoolExecutor() self._job_class = job_class if isinstance(self._job_class, list): self._job_class.reverse()
Initialize a fake account client.
https://github.com/qiskit/qiskit-ibmq-provider/blob/5e89e43eb7d97427946f3462cc6d814860c4a291/test/fake_account_client.py#L185-L193
import time import copy from random import randrange import uuid from concurrent.futures import ThreadPoolExecutor, wait from qiskit.test.mock.backends.poughkeepsie.fake_poughkeepsie import FakePoughkeepsie from qiskit.providers.ibmq.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES from qiskit.providers.ibmq.api.exceptions import RequestsApiError, UserTimeoutExceededError VALID_RESULT_RESPONSE = { 'backend_name': 'ibmqx2', 'backend_version': '1.1.1', 'job_id': 'XC1323XG2', 'qobj_id': 'Experiment1', 'success': True, 'results': [] } VALID_RESULT = { 'header': { 'name': 'Bell state', 'memory_slots': 2, 'creg_sizes': [['c', 2]], 'clbit_labels': [['c', 0], ['c', 1]], 'qubit_labels': [['q', 0], ['q', 1]] }, 'shots': 1024, 'status': 'DONE', 'success': True, 'data': { 'counts': { '0x0': 484, '0x3': 540 } } } class BaseFakeJob: _job_progress = [ ApiJobStatus.CREATING, ApiJobStatus.VALIDATING, ApiJobStatus.RUNNING, ApiJobStatus.COMPLETED ] def __init__(self, executor, job_id, qobj, backend_name, job_tags=None, job_name=None): self._job_id = job_id self._status = ApiJobStatus.CREATING self.qobj = qobj self._future = executor.submit(self._auto_progress) self._result = None self._backend_name = backend_name self._job_tags = job_tags self._job_name = job_name def _auto_progress(self): for status in self._job_progress: time.sleep(0.5) self._status = status if self._status == ApiJobStatus.COMPLETED: new_result = copy.deepcopy(VALID_RESULT_RESPONSE) for _ in range(len(self.qobj['experiments'])): valid_result = copy.deepcopy(VALID_RESULT) counts = randrange(1024) valid_result['data']['counts'] = { '0x0': counts, '0x3': 1024-counts} new_result['results'].append(valid_result) new_result['job_id'] = self._job_id new_result['backend_name'] = self._backend_name self._result = new_result def data(self): data = { 'job_id': self._job_id, 'kind': 'q-object', 'status': self._status.value, 'creation_date': '2019-01-01T13:15:58.425972', '_backend_info': {'name': self._backend_name} } if self._job_tags: data['tags'] = self._job_tags.copy() if self._job_name: data['name'] = self._job_name return data def cancel(self): self._future.cancel() wait([self._future]) self._status = ApiJobStatus.CANCELLED self._result = None def result(self): if not self._result: raise RequestsApiError('Result is not available') return self._result def status(self): return self._status def name(self): return self._job_name class CancelableFakeJob(BaseFakeJob): _job_progress = [ ApiJobStatus.CREATING, ApiJobStatus.VALIDATING, ApiJobStatus.RUNNING ] class NewFieldFakeJob(BaseFakeJob): def data(self): data = super().data() data['new_field'] = 'foo' return data class MissingFieldFakeJob(BaseFakeJob): def data(self): data = super().data() del data['job_id'] return data class FailedFakeJob(BaseFakeJob): _job_progress = [ ApiJobStatus.CREATING, ApiJobStatus.VALIDATING, ApiJobStatus.RUNNING, ApiJobStatus.ERROR_RUNNING_JOB ] def data(self): data = super().data() if self.status() == ApiJobStatus.ERROR_RUNNING_JOB: data['error'] = {'message': 'Job failed.', 'code': 1234} return data class BaseFakeAccountClient:
Apache License 2.0
catalyst-team/catalyst-rl
catalyst_rl/dl/experiment/base.py
BaseExperiment.get_loaders
python
def get_loaders( self, stage: str, epoch: int = None, ) -> "OrderedDict[str, DataLoader]": return self._loaders
Returns the loaders for a given stage
https://github.com/catalyst-team/catalyst-rl/blob/75ffa808e2bbb9071a169a1a9c813deb6a69a797/catalyst_rl/dl/experiment/base.py#L146-L152
from typing import Any, Dict, Iterable, List, Mapping, Union from collections import OrderedDict from torch import nn from torch.utils.data import DataLoader from catalyst_rl.dl import Callback, Experiment, utils from catalyst_rl.utils.tools.typing import Criterion, Model, Optimizer, Scheduler class BaseExperiment(Experiment): def __init__( self, model: Model, loaders: "OrderedDict[str, DataLoader]", callbacks: "Union[OrderedDict[str, Callback], List[Callback]]" = None, logdir: str = None, stage: str = "train", criterion: Criterion = None, optimizer: Optimizer = None, scheduler: Scheduler = None, num_epochs: int = 1, valid_loader: str = "valid", main_metric: str = "loss", minimize_metric: bool = True, verbose: bool = False, check_run: bool = False, state_kwargs: Dict = None, checkpoint_data: Dict = None, distributed_params: Dict = None, monitoring_params: Dict = None, initial_seed: int = 42, ): self._model = model self._loaders = loaders self._callbacks = utils.process_callbacks(callbacks) self._criterion = criterion self._optimizer = optimizer self._scheduler = scheduler self._initial_seed = initial_seed self._logdir = logdir self._stage = stage self._num_epochs = num_epochs self._valid_loader = valid_loader self._main_metric = main_metric self._minimize_metric = minimize_metric self._verbose = verbose self._check_run = check_run self._additional_state_kwargs = state_kwargs or {} self._checkpoint_data = checkpoint_data or {} self._distributed_params = distributed_params or {} self._monitoring_params = monitoring_params or {} @property def initial_seed(self) -> int: return self._initial_seed @property def logdir(self): return self._logdir @property def stages(self) -> Iterable[str]: return [self._stage] @property def distributed_params(self) -> Dict: return self._distributed_params @property def monitoring_params(self) -> Dict: return self._monitoring_params def get_state_params(self, stage: str) -> Mapping[str, Any]: default_params = dict( logdir=self.logdir, num_epochs=self._num_epochs, valid_loader=self._valid_loader, main_metric=self._main_metric, verbose=self._verbose, minimize_metric=self._minimize_metric, checkpoint_data=self._checkpoint_data, ) state_params = {**default_params, **self._additional_state_kwargs} return state_params def get_model(self, stage: str) -> Model: return self._model def get_criterion(self, stage: str) -> Criterion: return self._criterion def get_optimizer(self, stage: str, model: nn.Module) -> Optimizer: return self._optimizer def get_scheduler(self, stage: str, optimizer=None) -> Scheduler: return self._scheduler
Apache License 2.0
yen223/lunisolar
pycalcal/pycalcal.py
seconds
python
def seconds(clock): return clock[2]
Return the seconds of clock time 'clock'.
https://github.com/yen223/lunisolar/blob/c130efe847d2fc9876c1b5458536c565e9d3ea7e/pycalcal/pycalcal.py#L282-L284
from __future__ import division from mpmath import * mp.prec = 50 BOGUS = 'bogus' def quotient(m, n): from operator import floordiv return ifloor(m / n) def ifloor(n): from math import floor return int(floor(n)) def iround(n): from __builtin__ import round return int(round(n)) from operator import mod def amod(x, y): return y + (mod(x, -y)) def next(i, p): return i if p(i) else next(i+1, p) def final(i, p): return i - 1 if not p(i) else final(i+1, p) def summa(f, k, p): return 0 if not p(k) else f(k) + summa(f, k+1, p) def altsumma(f, k, p): if not p(k): return 0 else: S = f(k) C = 0 j = k + 1 while p(j): Y = f(j) - C T = S + Y C = (T - S) - Y S = T j += 1 return S def binary_search(lo, hi, p, e): x = (lo + hi) / 2 if p(lo, hi): return x elif e(x): return binary_search(lo, x, p, e) else: return binary_search(x, hi, p, e) def invert_angular(f, y, a, b, prec=10**-5): return binary_search(a, b, (lambda l, h: ((h - l) <= prec)), (lambda x: mod((f(x) - y), 360) < 180)) def sigma(l, b): return sum(b(*e) for e in zip(*l)) from copy import copy def poly(x, a): n = len(a) - 1 p = a[n] for i in range(1, n+1): p = p * x + a[n-i] return p def epoch(): return 0 def rd(tee): return tee - epoch() SUNDAY = 0 MONDAY = 1 TUESDAY = 2 WEDNESDAY = 3 THURSDAY = 4 FRIDAY = 5 SATURDAY = SUNDAY + 6 DAYS_OF_WEEK_NAMES = { SUNDAY : "Sunday", MONDAY : "Monday", TUESDAY : "Tuesday", WEDNESDAY : "Wednesday", THURSDAY : "Thursday", FRIDAY : "Friday", SATURDAY : "Saturday"} def day_of_week_from_fixed(date): return mod(date - rd(0) - SUNDAY, 7) def standard_month(date): return date[1] def standard_day(date): return date[2] def standard_year(date): return date[0] def time_of_day(hour, minute, second): return [hour, minute, second] def hour(clock): return clock[0] def minute(clock): return clock[1]
MIT License
skorch-dev/skorch
skorch/net.py
NeuralNet.validation_step
python
def validation_step(self, batch, **fit_params): self._set_training(False) Xi, yi = unpack_data(batch) with torch.no_grad(): y_pred = self.infer(Xi, **fit_params) loss = self.get_loss(y_pred, yi, X=Xi, training=False) return { 'loss': loss, 'y_pred': y_pred, }
Perform a forward step using batched data and return the resulting loss. The module is set to be in evaluation mode (e.g. dropout is not applied). Parameters ---------- batch A single batch returned by the data loader. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call.
https://github.com/skorch-dev/skorch/blob/32577c56b8765cbf2ed8b90c52ac6bc7b97d5c18/skorch/net.py#L844-L869
import fnmatch from functools import partial from itertools import chain from collections import OrderedDict from contextlib import contextmanager import tempfile import warnings import numpy as np from sklearn.base import BaseEstimator import torch from torch.utils.data import DataLoader from skorch.callbacks import EpochTimer from skorch.callbacks import PrintLog from skorch.callbacks import PassthroughScoring from skorch.callbacks.base import _issue_warning_if_on_batch_override from skorch.dataset import Dataset from skorch.dataset import CVSplit from skorch.dataset import get_len from skorch.dataset import unpack_data from skorch.exceptions import DeviceWarning from skorch.exceptions import SkorchAttributeError from skorch.history import History from skorch.setter import optimizer_setter from skorch.utils import _identity from skorch.utils import _infer_predict_nonlinearity from skorch.utils import FirstStepAccumulator from skorch.utils import TeeGenerator from skorch.utils import _check_f_arguments from skorch.utils import check_is_fitted from skorch.utils import duplicate_items from skorch.utils import get_map_location from skorch.utils import is_dataset from skorch.utils import params_for from skorch.utils import to_device from skorch.utils import to_numpy from skorch.utils import to_tensor class NeuralNet: prefixes_ = ['iterator_train', 'iterator_valid', 'callbacks', 'dataset'] cuda_dependent_attributes_ = [] init_context_ = None _modules = [] _criteria = [] _optimizers = [] def __init__( self, module, criterion, optimizer=torch.optim.SGD, lr=0.01, max_epochs=10, batch_size=128, iterator_train=DataLoader, iterator_valid=DataLoader, dataset=Dataset, train_split=CVSplit(5), callbacks=None, predict_nonlinearity='auto', warm_start=False, verbose=1, device='cpu', **kwargs ): self.module = module self.criterion = criterion self.optimizer = optimizer self.lr = lr self.max_epochs = max_epochs self.batch_size = batch_size self.iterator_train = iterator_train self.iterator_valid = iterator_valid self.dataset = dataset self.train_split = train_split self.callbacks = callbacks self.predict_nonlinearity = predict_nonlinearity self.warm_start = warm_start self.verbose = verbose self.device = device self._check_deprecated_params(**kwargs) history = kwargs.pop('history', None) initialized = kwargs.pop('initialized_', False) virtual_params = kwargs.pop('virtual_params_', dict()) self._kwargs = kwargs vars(self).update(kwargs) self.history_ = history self.initialized_ = initialized self.virtual_params_ = virtual_params @property def history(self): return self.history_ @history.setter def history(self, value): self.history_ = value @property def _default_callbacks(self): return [ ('epoch_timer', EpochTimer()), ('train_loss', PassthroughScoring( name='train_loss', on_train=True, )), ('valid_loss', PassthroughScoring( name='valid_loss', )), ('print_log', PrintLog()), ] def get_default_callbacks(self): return self._default_callbacks def notify(self, method_name, **cb_kwargs): if not self.history: _issue_warning_if_on_batch_override(self.callbacks_) getattr(self, method_name)(self, **cb_kwargs) for _, cb in self.callbacks_: getattr(cb, method_name)(self, **cb_kwargs) def on_train_begin(self, net, X=None, y=None, **kwargs): pass def on_train_end(self, net, X=None, y=None, **kwargs): pass def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, **kwargs): self.history.new_epoch() self.history.record('epoch', len(self.history)) def on_epoch_end(self, net, dataset_train=None, dataset_valid=None, **kwargs): pass def on_batch_begin(self, net, batch=None, training=False, **kwargs): self.history.new_batch() def on_batch_end(self, net, batch=None, training=False, **kwargs): pass def on_grad_computed( self, net, named_parameters, batch=None, training=False, **kwargs): pass def _yield_callbacks(self): print_logs = [] for item in self.get_default_callbacks() + (self.callbacks or []): if isinstance(item, (tuple, list)): named_by_user = True name, cb = item else: named_by_user = False cb = item if isinstance(cb, type): name = cb.__name__ else: name = cb.__class__.__name__ if isinstance(cb, PrintLog) or (cb == PrintLog): print_logs.append((name, cb, named_by_user)) else: yield name, cb, named_by_user yield from print_logs def _callbacks_grouped_by_name(self): callbacks, names_set_by_user = OrderedDict(), set() for name, cb, named_by_user in self._yield_callbacks(): if named_by_user: names_set_by_user.add(name) callbacks[name] = callbacks.get(name, []) + [cb] return callbacks, names_set_by_user def _uniquely_named_callbacks(self): grouped_cbs, names_set_by_user = self._callbacks_grouped_by_name() for name, cbs in grouped_cbs.items(): if len(cbs) > 1 and name in names_set_by_user: raise ValueError("Found duplicate user-set callback name " "'{}'. Use unique names to correct this." .format(name)) for i, cb in enumerate(cbs): if len(cbs) > 1: unique_name = '{}_{}'.format(name, i+1) if unique_name in grouped_cbs: raise ValueError("Assigning new callback name failed " "since new name '{}' exists already." .format(unique_name)) else: unique_name = name yield unique_name, cb def initialize_callbacks(self): callbacks_ = [] class Dummy: pass for name, cb in self._uniquely_named_callbacks(): param_callback = getattr(self, 'callbacks__' + name, Dummy) if param_callback is not Dummy: cb = param_callback params = self.get_params_for('callbacks__{}'.format(name)) if (cb is None) and params: raise ValueError("Trying to set a parameter for callback {} " "which does not exist.".format(name)) if cb is None: continue if isinstance(cb, type): cb = cb(**params) else: cb.set_params(**params) cb.initialize() callbacks_.append((name, cb)) self.callbacks_ = callbacks_ return self def initialized_instance(self, instance_or_cls, kwargs): is_init = isinstance(instance_or_cls, torch.nn.Module) if is_init and not kwargs: return instance_or_cls if is_init: return type(instance_or_cls)(**kwargs) return instance_or_cls(**kwargs) def initialize_criterion(self): kwargs = self.get_params_for('criterion') criterion = self.initialized_instance(self.criterion, kwargs) self.criterion_ = criterion return self def initialize_module(self): kwargs = self.get_params_for('module') module = self.initialized_instance(self.module, kwargs) self.module_ = module return self def _is_virtual_param(self, key): return any(fnmatch.fnmatch(key, pat) for pat in self.virtual_params_) def _virtual_setattr(self, param, val): setattr(self, param, val) def _register_virtual_param(self, param_patterns, fn=_virtual_setattr): if not isinstance(param_patterns, list): param_patterns = [param_patterns] for pattern in param_patterns: self.virtual_params_[pattern] = fn def _apply_virtual_params(self, virtual_kwargs): for pattern, fn in self.virtual_params_.items(): for key, val in virtual_kwargs.items(): if not fnmatch.fnmatch(key, pattern): continue fn(self, key, val) def initialize_virtual_params(self): self.virtual_params_ = {} def initialize_optimizer(self, triggered_directly=None): if triggered_directly is not None: warnings.warn( "The 'triggered_directly' argument to 'initialize_optimizer' is " "deprecated, please don't use it anymore.", DeprecationWarning) named_parameters = self.get_all_learnable_params() args, kwargs = self.get_params_for_optimizer( 'optimizer', named_parameters) self.optimizer_ = self.optimizer(*args, **kwargs) return self def initialize_history(self): self.history_ = History() return self def _format_reinit_msg(self, name, kwargs=None, triggered_directly=True): msg = "Re-initializing {}".format(name) if triggered_directly and kwargs: msg += (" because the following parameters were re-set: {}" .format(', '.join(sorted(kwargs)))) msg += "." return msg @contextmanager def _current_init_context(self, name): try: self.init_context_ = name yield finally: self.init_context_ = None def _initialize_virtual_params(self): with self._current_init_context('virtual_params'): self.initialize_virtual_params() return self def _initialize_callbacks(self): with self._current_init_context('callbacks'): if self.callbacks == "disable": self.callbacks_ = [] return self self.initialize_callbacks() return self def _initialize_criterion(self, reason=None): with self._current_init_context('criterion'): kwargs = {} for criterion_name in self._criteria: kwargs.update(self.get_params_for(criterion_name)) has_init_criterion = any( isinstance(getattr(self, criterion_name + '_', None), torch.nn.Module) for criterion_name in self._criteria) if kwargs or reason or has_init_criterion: if self.initialized_ and self.verbose: if reason: msg = reason else: msg = self._format_reinit_msg("criterion", kwargs) print(msg) self.initialize_criterion() for name in self._criteria: criterion = getattr(self, name + '_') if isinstance(criterion, torch.nn.Module): setattr(self, name + '_', to_device(criterion, self.device)) return self def _initialize_module(self, reason=None): with self._current_init_context('module'): kwargs = {} for module_name in self._modules: kwargs.update(self.get_params_for(module_name)) has_init_module = any( isinstance(getattr(self, module_name + '_', None), torch.nn.Module) for module_name in self._modules) if kwargs or reason or has_init_module: if self.initialized_ and self.verbose: if reason: msg = reason else: msg = self._format_reinit_msg("module", kwargs) print(msg) self.initialize_module() for name in self._modules: module = getattr(self, name + '_') if isinstance(module, torch.nn.Module): setattr(self, name + '_', to_device(module, self.device)) return self def get_all_learnable_params(self): seen = set() for name in self._modules + self._criteria: module = getattr(self, name + '_') named_parameters = getattr(module, 'named_parameters', None) if not named_parameters: continue for param_name, param in named_parameters(): if param in seen: continue seen.add(param) yield param_name, param def _initialize_optimizer(self, reason=None): with self._current_init_context('optimizer'): if self.initialized_ and self.verbose: if reason: msg = reason else: msg = self._format_reinit_msg("optimizer", triggered_directly=False) print(msg) self.initialize_optimizer() for name in self._optimizers: param_pattern = [name + '__param_groups__*__*', name + '__*'] if name == 'optimizer': param_pattern.append('lr') setter = partial( optimizer_setter, optimizer_attr=name + '_', optimizer_name=name, ) self._register_virtual_param(param_pattern, setter) return self def _initialize_history(self): with self._current_init_context('history'): self.initialize_history() return self def initialize(self): self._initialize_virtual_params() self._initialize_callbacks() self._initialize_module() self._initialize_criterion() self._initialize_optimizer() self._initialize_history() self._check_kwargs(self._kwargs) self.initialized_ = True return self def check_data(self, X, y=None): pass def _set_training(self, training=True): for module_name in self._modules + self._criteria: module = getattr(self, module_name + '_') if isinstance(module, torch.nn.Module): module.train(training)
BSD 3-Clause New or Revised License
openstack/kuryr-libnetwork
kuryr_libnetwork/port_driver/drivers/veth.py
VethDriver.get_supported_bindings
python
def get_supported_bindings(self): return self.BINDING_DRIVERS
Returns a tuple of supported binding driver names for the driver. :returns: a tuple of strings
https://github.com/openstack/kuryr-libnetwork/blob/e5daefc3c2dae2d4f65ef584eb257e841cd417e7/kuryr_libnetwork/port_driver/drivers/veth.py#L24-L29
from kuryr.lib import binding from kuryr.lib.binding.drivers import utils from kuryr_libnetwork.port_driver import driver class VethDriver(driver.Driver): BINDING_DRIVERS = ('veth',)
Apache License 2.0
amossys/fragscapy
fragscapy/modifications/mod.py
Mod.get_params
python
def get_params(self): return {k: v for k, v in vars(self).items() if k[0] != "_"}
Returns a dictionnary of the options defining the mod.
https://github.com/amossys/fragscapy/blob/3ee7f5c73fc6c7eb64858e197c0b8d2b313734e0/fragscapy/modifications/mod.py#L110-L112
import abc class Mod(abc.ABC): name = None doc = None _nb_args = -1 def __init__(self, *args): self.check_args(*args) self.parse_args(*args) def is_deterministic(self): return True def parse_args(self, *args): def check_args(self, *args): if self._nb_args >= 0 and len(args) != self._nb_args: raise ValueError( "Incorrect number of parameters specified. " "Got {}, expected {}.".format(len(args), self._nb_args) ) @abc.abstractmethod def apply(self, pkt_list): raise NotImplementedError @classmethod def usage(cls): if cls.name is None: print(cls.__class__.__name__.lower()) else: print(cls.name) print("==========") if cls.doc is None: print("No usage documented") else: print(" ", cls.doc.replace('\n', '\n '), sep='')
MIT License
jubatus/jubakit
jubakit/base.py
BaseSchema.predict
python
def predict(cls, row, typed): raise NotImplementedError()
Predicts a Schema from dict-like row object.
https://github.com/jubatus/jubakit/blob/f6252ba627ce4e2e42eb9aafaaf05c882bc1c678/jubakit/base.py#L91-L95
from __future__ import absolute_import, division, print_function, unicode_literals import collections import copy import random import math import jubatus from .shell import JubaShell from .compat import * from .logger import get_logger from ._process import _ServiceBackend _logger = get_logger() class BaseLoader(object): def is_infinite(self): return False def preprocess(self, ent): return ent def __iter__(self): for ent in self.rows(): processed = self.preprocess(ent) if processed is not None: yield processed def rows(self): raise NotImplementedError() class BaseSchema(object): IGNORE = '_' AUTO = '.' INFER = '?' def __init__(self, mapping, fallback=None): self._fallback = fallback self._key2type, self._key2name = BaseSchema._normalize_mapping(mapping) def transform(self, row): raise NotImplementedError() @classmethod
MIT License
opensearch-project/opensearch-py
opensearchpy/client/cat.py
CatClient.allocation
python
def allocation(self, node_id=None, params=None, headers=None): return self.transport.perform_request( "GET", _make_path("_cat", "allocation", node_id), params=params, headers=headers, )
Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. :arg node_id: A comma-separated list of node IDs or names to limit the returned information :arg bytes: The unit in which to display byte values Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb :arg format: a short version of the Accept header, e.g. json, yaml :arg h: Comma-separated list of column names to display :arg help: Return help information :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Explicit operation timeout for connection to master node :arg s: Comma-separated list of column names or column aliases to sort by :arg v: Verbose mode. Display column headers
https://github.com/opensearch-project/opensearch-py/blob/4281fe0e2c6baefc7abeda115b1f0cb1f746ebba/opensearchpy/client/cat.py#L57-L84
from .utils import NamespacedClient, _make_path, query_params class CatClient(NamespacedClient): @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v") def aliases(self, name=None, params=None, headers=None): return self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers ) @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v")
Apache License 2.0
ancasag/ensembleobjectdetection
TestTimeAugmentation/kerasfcos/models/retinanet.py
__build_model_pyramid
python
def __build_model_pyramid(model_name, model, features): return keras.layers.Concatenate(axis=1, name=model_name)([model(f) for f in features])
Applies a single submodel to each FPN level. Args model_name: Name of the submodel. model: The submodel to evaluate. features: The FPN features. [P3, P4, P5, P6, P7] Returns A tensor containing the response from the submodel on the FPN features.
https://github.com/ancasag/ensembleobjectdetection/blob/2c3be846caf31eafab8b5660a3f62a6d88578c03/TestTimeAugmentation/kerasfcos/models/retinanet.py#L240-L252
import sys sys.path.append("../") import keras import keras.backend as K from .. import initializers from .. import layers from ..utils.anchors import AnchorParameters from . import assert_training_model def default_shared_model( pyramid_feature_size=256, classification_feature_size=256, name='shared_submodel' ): options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', } inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size)) outputs = inputs for i in range(4): outputs = keras.layers.Conv2D( filters=classification_feature_size, activation='relu', name='pyramid_shared_{}'.format(i), kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer='zeros', **options )(outputs) return keras.models.Model(inputs=inputs, outputs=outputs, name=name) def default_classification_model( num_classes, shared_model, pyramid_feature_size=256, prior_probability=0.01, name='classification_submodel' ): options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', } inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size)) outputs = shared_model(inputs) outputs = keras.layers.Conv2D( filters=num_classes, kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer=initializers.PriorProbability(probability=prior_probability), name='pyramid_classification', **options )(outputs) outputs = keras.layers.Reshape((-1, num_classes), name='pyramid_classification_reshape')(outputs) outputs = keras.layers.Activation('sigmoid', name='pyramid_classification_sigmoid')(outputs) return keras.models.Model(inputs=inputs, outputs=outputs, name=name) def default_centerness_model( shared_model, pyramid_feature_size=256, name='centerness_submodel' ): options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', } inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size)) outputs = shared_model(inputs) outputs = keras.layers.Conv2D( filters=1, kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer='zeros', name='pyramid_centerness', **options )(outputs) outputs = keras.layers.Reshape((-1, 1), name='pyramid_centerness_reshape')(outputs) outputs = keras.layers.Activation('sigmoid', name='pyramid_centerness_sigmoid')(outputs) return keras.models.Model(inputs=inputs, outputs=outputs, name=name) def default_regression_model(num_values=4, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'): options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'kernel_initializer': keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), 'bias_initializer': 'zeros' } inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size)) outputs = inputs for i in range(4): outputs = keras.layers.Conv2D( filters=regression_feature_size, activation='relu', name='pyramid_regression_{}'.format(i), **options )(outputs) outputs = keras.layers.Conv2D(num_values, name='pyramid_regression', **options)(outputs) outputs = keras.layers.Reshape((-1, num_values), name='pyramid_regression_reshape')(outputs) outputs = keras.layers.Lambda(lambda x: K.exp(x))(outputs) return keras.models.Model(inputs=inputs, outputs=outputs, name=name) def __create_pyramid_features(C3, C4, C5, feature_size=256): P5 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C5_reduced')(C5) P5_upsampled = layers.UpsampleLike(name='P5_upsampled')([P5, C4]) P5 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P5')(P5) P4 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced')(C4) P4 = keras.layers.Add(name='P4_merged')([P5_upsampled, P4]) P4_upsampled = layers.UpsampleLike(name='P4_upsampled')([P4, C3]) P4 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4')(P4) P3 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced')(C3) P3 = keras.layers.Add(name='P3_merged')([P4_upsampled, P3]) P3 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3')(P3) P6 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P6')(C5) P7 = keras.layers.Activation('relu', name='C6_relu')(P6) P7 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P7')(P7) return [P3, P4, P5, P6, P7] def default_submodels(num_classes): shared_model = default_shared_model(pyramid_feature_size=256, classification_feature_size=256) return [ ('regression', default_regression_model(num_values=4)), ('classification', default_classification_model(num_classes=num_classes, shared_model=shared_model)), ('centerness', default_centerness_model(shared_model=shared_model)) ]
MIT License
pyoceans/pocean-core
pocean/dsg/utils.py
get_calculated_attributes
python
def get_calculated_attributes(df, axes=None, history=None): axes = get_default_axes(axes) attrs = get_geographic_attributes(df, axes) attrs = dict_update(attrs, get_vertical_attributes(df, axes)) attrs = dict_update(attrs, get_temporal_attributes(df, axes)) attrs = dict_update(attrs, get_creation_attributes(history)) return attrs
Functions to automate netCDF attribute generation from the data itself This is a wrapper for the other four functions, which could be called separately. :param df: data (Pandas DataFrame) :param axes: keys (x,y,z,t) are associated with actual column names (dictionary) :param history: history: text initializing audit trail for modifications to the original data (optional, string) :return: dictionary of global attributes
https://github.com/pyoceans/pocean-core/blob/a2434b249dfc7b020f5ecbf509a1af60d01a6294/pocean/dsg/utils.py#L19-L35
from __future__ import division from datetime import datetime import pandas as pd from shapely.validation import explain_validity from shapely.geometry import Point, Polygon, LineString, box from pocean.utils import ( get_default_axes, unique_justseen, dict_update ) from pocean import logger as L
MIT License
qiboteam/qibo
src/qibo/models/grover.py
Grover.execute
python
def execute(self, nshots=100, freq=False, logs=False): if (self.num_sol or self.targ_a) and not self.iterative: if self.targ_a: it = int(np.pi * (1/self.targ_a) / 4) else: it = int(np.pi * np.sqrt(self.sup_size / self.num_sol) / 4) circuit = self.circuit(it) result = circuit(nshots=nshots).frequencies(binary=True) if freq: if logs: log.info("Result of sampling Grover's algorihm") log.info(result) self.frequencies = result if logs: log.info(f"Most common states found using Grover's algorithm with {it} iterations:") if self.targ_a: most_common = result.most_common(1) else: most_common = result.most_common(self.num_sol) self.solution = [] self.iterations = it for i in most_common: if logs: log.info(i[0]) self.solution.append(i[0]) if logs: if self.check: if self.check(i[0], *self.check_args): log.info('Solution checked and successful.') else: log.info('Not a solution of the problem. Something went wrong.') else: if not self.check: raise_error(ValueError, "Check function needed for iterative approach.") measured, total_iterations = self.iterative_grover() if logs: log.info('Solution found in an iterative process.') log.info(f'Solution: {measured}') log.info(f'Total Grover iterations taken: {total_iterations}') self.solution = measured self.iterations = total_iterations return self.solution, self.iterations
Execute Grover's algorithm. If the number of solutions is given, calculates iterations, otherwise it uses an iterative approach. Args: nshots (int): number of shots in order to get the frequencies. freq (bool): print the full frequencies after the exact Grover algorithm. Returns: solution (str): bitstring (or list of bitstrings) measured as solution of the search. iterations (int): number of oracle calls done to reach a solution.
https://github.com/qiboteam/qibo/blob/d8bd2d3de0d8eb12a428a9125302e318480e982a/src/qibo/models/grover.py#L175-L229
import numpy as np from qibo import gates from qibo.config import log, raise_error from qibo.models.circuit import Circuit class Grover(object): def __init__(self, oracle, superposition_circuit=None, initial_state_circuit=None, superposition_qubits=None, superposition_size=None, number_solutions=None, target_amplitude = None, check=None, check_args=(), iterative=False): self.oracle = oracle self.initial_state_circuit = initial_state_circuit if superposition_circuit: self.superposition = superposition_circuit else: if not superposition_qubits: raise_error(ValueError, "Cannot create Grover model if the " "superposition circuit or number of " "qubits is not specified.") self.superposition = Circuit(superposition_qubits) self.superposition.add([gates.H(i) for i in range(superposition_qubits)]) if superposition_qubits: self.sup_qubits = superposition_qubits else: self.sup_qubits = self.superposition.nqubits if superposition_size: self.sup_size = superposition_size else: self.sup_size = int(2 ** self.sup_qubits) assert oracle.nqubits > self.sup_qubits self.anc_qubits_sup = self.superposition.nqubits - self.sup_qubits self.anc_qubits_ora = self.oracle.nqubits - self.sup_qubits - 1 self.nqubits = self.sup_qubits + max(self.anc_qubits_sup, self.anc_qubits_ora) + 1 self.check = check self.check_args = check_args self.num_sol = number_solutions self.targ_a = target_amplitude self.iterative = iterative self.space_sup = list(range(self.sup_qubits + self.anc_qubits_sup)) self.space_ora = list(range(self.sup_qubits + self.anc_qubits_ora)) + [self.nqubits-1] def initialize(self): c = Circuit(self.nqubits) c.add(gates.X(self.nqubits-1)) c.add(gates.H(self.nqubits-1)) if self.initial_state_circuit: c.add(self.initial_state_circuit.invert().on_qubits(*range(self.initial_state_circuit.nqubits))) c.add(self.superposition.on_qubits(*self.space_sup)) return c def diffusion(self): nqubits = self.superposition.nqubits + 1 c = Circuit(nqubits) c.add(self.superposition.invert().on_qubits(*range(nqubits-1))) if self.initial_state_circuit: c.add(self.initial_state_circuit.invert().on_qubits(*range(self.initial_state_circuit.nqubits))) c.add([gates.X(i) for i in range(self.sup_qubits)]) c.add(gates.X(nqubits-1).controlled_by(*range(self.sup_qubits))) c.add([gates.X(i) for i in range(self.sup_qubits)]) if self.initial_state_circuit: c.add(self.initial_state_circuit.on_qubits(*range(self.initial_state_circuit.nqubits))) c.add(self.superposition.on_qubits(*range(nqubits-1))) return c def step(self): c = Circuit(self.nqubits) c.add(self.oracle.on_qubits(*self.space_ora)) c.add(self.diffusion().on_qubits(*(self.space_sup+[self.nqubits-1]))) return c def circuit(self, iterations): c = Circuit(self.nqubits) c += self.initialize() for _ in range(iterations): c += self.step() c.add(gates.M(*range(self.sup_qubits))) return c def iterative_grover(self, lamda_value=6/5): k = 1 lamda = lamda_value total_iterations = 0 while True: it = np.random.randint(k + 1) if it != 0: total_iterations += it circuit = self.circuit(it) result = circuit(nshots=1) measured = result.frequencies(binary=True).most_common(1)[0][0] if self.check(measured, *self.check_args): return measured, total_iterations k = min(lamda * k, np.sqrt(self.sup_size)) if total_iterations > (9/4) * np.sqrt(self.sup_size): log.warning("Too many total iterations, output might not be solution.") return measured, total_iterations
Apache License 2.0
rainingcomputers/pykitml
pykitml/_minimize_model.py
MinimizeModel.train
python
def train(self, training_data, targets, batch_size, epochs, optimizer, testing_data=None, testing_targets=None, testing_freq=1, decay_freq=1): print('Training Model...') self._performance_log = {} self._performance_log['epoch'] = [] self._performance_log['cost_train'] = [] self._performance_log['learning_rate'] = [] if(testing_data is not None): self._performance_log['cost_test'] = [] self._init_train(batch_size) pbar = tqdm.trange(0, epochs, ncols=80, unit='epochs') for epoch in pbar: total_gradient = self._get_batch_grad(epoch, batch_size, training_data, targets) self._mparams = optimizer._optimize(self._mparams, total_gradient) if((epoch+1) % decay_freq == 0): optimizer._decay() if((epoch+1)%testing_freq == 0): self._performance_log['epoch'].append(epoch+1) learning_rate = optimizer._learning_rate self._performance_log['learning_rate'].append(learning_rate) cost_train = self.cost(training_data, targets) pbar.set_postfix(cost=cost_train) self._performance_log['cost_train'].append(cost_train) if(testing_data is None): continue cost_test = self.cost(testing_data, testing_targets) self._performance_log['cost_test'].append(cost_test) pbar.close()
Trains the model on the training data, after training is complete, you can call :py:func:`plot_performance` to plot performance graphs. Parameters ---------- training_data : numpy.array numpy array containing training data. targets : numpy.array numpy array containing training targets, corresponding to the training data. batch_size : int Number of training examples to use in one epoch, or number of training examples to use to estimate the gradient. epochs : int Number of epochs the model should be trained for. optimizer : any Optimizer object See :ref:`optimizers` testing_data : numpy.array numpy array containing testing data. testing_targets : numpy.array numpy array containing testing targets, corresponding to the testing data. testing_freq : int How frequently the model should be tested, i.e the model will be tested after every :code:`testing_freq` epochs. You may want to increase this to reduce training time. decay_freq : int How frequently the model should decay the learning rate. The learning rate will decay after every :code:`decay_freq` epochs. Raises ------ ValueError If :code:`training_data`, :code:`targets`, :code:`testing_data` or :code:`testing_targets` has invalid dimensions/shape.
https://github.com/rainingcomputers/pykitml/blob/1c3e50cebcdb6c4da63979ef9a812b44d23a4857/pykitml/_minimize_model.py#L13-L93
from abc import ABC, abstractmethod import numpy as np import matplotlib.pyplot as plt import tqdm class MinimizeModel(ABC):
MIT License
petl-developers/petl
petl/transform/joins.py
lookupjoin
python
def lookupjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return LookupJoinView(left, right, lkey, rkey, presorted=presorted, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix)
Perform a left join, but where the key is not unique in the right-hand table, arbitrarily choose the first row and ignore others. E.g.:: >>> import petl as etl >>> table1 = [['id', 'color', 'cost'], ... [1, 'blue', 12], ... [2, 'red', 8], ... [3, 'purple', 4]] >>> table2 = [['id', 'shape', 'size'], ... [1, 'circle', 'big'], ... [1, 'circle', 'small'], ... [2, 'square', 'tiny'], ... [2, 'square', 'big'], ... [3, 'ellipse', 'small'], ... [3, 'ellipse', 'tiny']] >>> table3 = etl.lookupjoin(table1, table2, key='id') >>> table3 +----+----------+------+-----------+---------+ | id | color | cost | shape | size | +====+==========+======+===========+=========+ | 1 | 'blue' | 12 | 'circle' | 'big' | +----+----------+------+-----------+---------+ | 2 | 'red' | 8 | 'square' | 'tiny' | +----+----------+------+-----------+---------+ | 3 | 'purple' | 4 | 'ellipse' | 'small' | +----+----------+------+-----------+---------+ See also :func:`petl.transform.joins.leftjoin`.
https://github.com/petl-developers/petl/blob/da43793a378d4b80f2b9be4ff53bf2f1686b0382/petl/transform/joins.py#L643-L682
from __future__ import absolute_import, print_function, division import itertools import operator from petl.compat import next, text_type from petl.errors import ArgumentError from petl.comparison import comparable_itemgetter, Comparable from petl.util.base import Table, asindices, rowgetter, rowgroupby, header, data from petl.transform.sorts import sort from petl.transform.basics import cut, cutout from petl.transform.dedup import distinct def natural_key(left, right): lhdr = header(left) lflds = list(map(str, lhdr)) rhdr = header(right) rflds = list(map(str, rhdr)) key = [f for f in lflds if f in rflds] assert len(key) > 0, 'no fields in common' if len(key) == 1: key = key[0] return key def keys_from_args(left, right, key, lkey, rkey): if key is lkey is rkey is None: lkey = rkey = natural_key(left, right) elif key is not None and lkey is rkey is None: lkey = rkey = key elif key is None and lkey is not None and rkey is not None: pass else: raise ArgumentError( 'bad key arguments: either specify key, or specify both lkey and ' 'rkey, or provide no key/lkey/rkey arguments at all (natural join)' ) return lkey, rkey def join(left, right, key=None, lkey=None, rkey=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix) Table.join = join class JoinView(Table): def __init__(self, left, right, lkey, rkey, presorted=False, leftouter=False, rightouter=False, missing=None, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): self.lkey = lkey self.rkey = rkey if presorted: self.left = left self.right = right else: self.left = sort(left, lkey, buffersize=buffersize, tempdir=tempdir, cache=cache) self.right = sort(right, rkey, buffersize=buffersize, tempdir=tempdir, cache=cache) self.leftouter = leftouter self.rightouter = rightouter self.missing = missing self.lprefix = lprefix self.rprefix = rprefix def __iter__(self): return iterjoin(self.left, self.right, self.lkey, self.rkey, leftouter=self.leftouter, rightouter=self.rightouter, missing=self.missing, lprefix=self.lprefix, rprefix=self.rprefix) def leftjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, leftouter=True, rightouter=False, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix) Table.leftjoin = leftjoin def rightjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, leftouter=False, rightouter=True, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix) Table.rightjoin = rightjoin def outerjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, leftouter=True, rightouter=True, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix) Table.outerjoin = outerjoin def iterjoin(left, right, lkey, rkey, leftouter=False, rightouter=False, missing=None, lprefix=None, rprefix=None): lit = iter(left) rit = iter(right) lhdr = next(lit) rhdr = next(rit) lkind = asindices(lhdr, lkey) rkind = asindices(rhdr, rkey) lgetk = comparable_itemgetter(*lkind) rgetk = comparable_itemgetter(*rkind) rvind = [i for i in range(len(rhdr)) if i not in rkind] rgetv = rowgetter(*rvind) if lprefix is None: outhdr = list(lhdr) else: outhdr = [(text_type(lprefix) + text_type(f)) for f in lhdr] if rprefix is None: outhdr.extend(rgetv(rhdr)) else: outhdr.extend([(text_type(rprefix) + text_type(f)) for f in rgetv(rhdr)]) yield tuple(outhdr) def joinrows(_lrowgrp, _rrowgrp): if _rrowgrp is None: for lrow in _lrowgrp: outrow = list(lrow) outrow.extend([missing] * len(rvind)) yield tuple(outrow) elif _lrowgrp is None: for rrow in _rrowgrp: outrow = [missing] * len(lhdr) for li, ri in zip(lkind, rkind): outrow[li] = rrow[ri] outrow.extend(rgetv(rrow)) yield tuple(outrow) else: _rrowgrp = list(_rrowgrp) for lrow in _lrowgrp: for rrow in _rrowgrp: outrow = list(lrow) outrow.extend(rgetv(rrow)) yield tuple(outrow) lgit = itertools.groupby(lit, key=lgetk) rgit = itertools.groupby(rit, key=rgetk) lrowgrp = [] rrowgrp = [] lkval, rkval = Comparable(None), Comparable(None) try: lkval, lrowgrp = next(lgit) rkval, rrowgrp = next(rgit) while True: if lkval < rkval: if leftouter: for row in joinrows(lrowgrp, None): yield tuple(row) lkval, lrowgrp = next(lgit) elif lkval > rkval: if rightouter: for row in joinrows(None, rrowgrp): yield tuple(row) rkval, rrowgrp = next(rgit) else: for row in joinrows(lrowgrp, rrowgrp): yield tuple(row) lkval, lrowgrp = next(lgit) rkval, rrowgrp = next(rgit) except StopIteration: pass if leftouter: if lkval > rkval: for row in joinrows(lrowgrp, None): yield tuple(row) for lkval, lrowgrp in lgit: for row in joinrows(lrowgrp, None): yield tuple(row) if rightouter: if lkval < rkval: for row in joinrows(None, rrowgrp): yield tuple(row) for rkval, rrowgrp in rgit: for row in joinrows(None, rrowgrp): yield tuple(row) def crossjoin(*tables, **kwargs): return CrossJoinView(*tables, **kwargs) Table.crossjoin = crossjoin class CrossJoinView(Table): def __init__(self, *sources, **kwargs): self.sources = sources self.prefix = kwargs.get('prefix', False) def __iter__(self): return itercrossjoin(self.sources, self.prefix) def itercrossjoin(sources, prefix): outhdr = list() for i, s in enumerate(sources): if prefix: outhdr.extend([text_type(i+1) + '_' + text_type(f) for f in header(s)]) else: outhdr.extend(header(s)) yield tuple(outhdr) datasrcs = [data(src) for src in sources] for prod in itertools.product(*datasrcs): outrow = list() for row in prod: outrow.extend(row) yield tuple(outrow) def antijoin(left, right, key=None, lkey=None, rkey=None, presorted=False, buffersize=None, tempdir=None, cache=True): lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return AntiJoinView(left=left, right=right, lkey=lkey, rkey=rkey, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache) Table.antijoin = antijoin class AntiJoinView(Table): def __init__(self, left, right, lkey, rkey, presorted=False, buffersize=None, tempdir=None, cache=True): if presorted: self.left = left self.right = right else: self.left = sort(left, lkey, buffersize=buffersize, tempdir=tempdir, cache=cache) self.right = sort(right, rkey, buffersize=buffersize, tempdir=tempdir, cache=cache) self.lkey = lkey self.rkey = rkey def __iter__(self): return iterantijoin(self.left, self.right, self.lkey, self.rkey) def iterantijoin(left, right, lkey, rkey): lit = iter(left) rit = iter(right) lhdr = next(lit) rhdr = next(rit) yield tuple(lhdr) lkind = asindices(lhdr, lkey) rkind = asindices(rhdr, rkey) lgetk = comparable_itemgetter(*lkind) rgetk = comparable_itemgetter(*rkind) lgit = itertools.groupby(lit, key=lgetk) rgit = itertools.groupby(rit, key=rgetk) lrowgrp = [] lkval, rkval = Comparable(None), Comparable(None) try: lkval, lrowgrp = next(lgit) rkval, _ = next(rgit) while True: if lkval < rkval: for row in lrowgrp: yield tuple(row) lkval, lrowgrp = next(lgit) elif lkval > rkval: rkval, _ = next(rgit) else: lkval, lrowgrp = next(lgit) rkval, _ = next(rgit) except StopIteration: pass if lkval > rkval: for row in lrowgrp: yield tuple(row) for lkval, lrowgrp in lgit: for row in lrowgrp: yield tuple(row)
MIT License
zhaocq-nlp/njunmt-tf
njunmt/inference/decode.py
evaluate
python
def evaluate(sess, loss_op, eval_data): total_loss = 0. total_weight = 0. for data in eval_data: parallels = data["feed_dict"].pop("parallels") avail = sum(numpy.array(parallels) > 0) loss = _evaluate(sess, data["feed_dict"], loss_op[:avail]) data["feed_dict"]["parallels"] = parallels total_loss += sum([_l[0] for _l in loss]) total_weight += sum([_l[1] for _l in loss]) loss = total_loss / total_weight return loss
Evaluates data by loss. Args: sess: `tf.Session`. loss_op: Tensorflow operation, computing the loss. eval_data: An iterable instance that each element is a packed feeding dictionary for `sess`. Returns: Total loss averaged by number of data samples.
https://github.com/zhaocq-nlp/njunmt-tf/blob/f1440726b3c007bcf19126fc4dee43a91dccc718/njunmt/inference/decode.py#L48-L69
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy import tensorflow as tf from njunmt.inference.attention import postprocess_attention from njunmt.inference.attention import select_attention_sample_by_sample from njunmt.inference.attention import pack_batch_attention_dict from njunmt.inference.attention import dump_attentions from njunmt.tools.tokenizeChinese import to_chinese_char from njunmt.utils.expert_utils import repeat_n_times from njunmt.utils.misc import open_file def _evaluate( sess, feed_dict, eval_op): return sess.run(eval_op, feed_dict=feed_dict)
Apache License 2.0
aboudykreidieh/h-baselines
hbaselines/envs/efficient_hrl/envs.py
UniversalHumanoidMazeEnv.reset
python
def reset(self): try: self.prev_obs = super(UniversalHumanoidMazeEnv, self).reset() except (NotImplementedError, AttributeError): self.prev_obs = np.empty(1) self.step_number = 0 if self.use_contexts: if not self.random_contexts: if isinstance(self.context_range[0], list): self.current_context = random.sample(self.context_range, 1) self.current_context = self.current_context[0] else: self.current_context = self.context_range else: self.current_context = [] for range_i in self.context_range: minval, maxval = range_i self.current_context.append(random.uniform(minval, maxval)) self.current_context = np.asarray(self.current_context) return self.prev_obs
Reset the environment. If the environment is using the contextual setting, a new context is issued. Returns ------- array_like initial observation
https://github.com/aboudykreidieh/h-baselines/blob/47c858584c6de3867a9981dfe974e341188626ed/hbaselines/envs/efficient_hrl/envs.py#L437-L477
import numpy as np import random from gym.spaces import Box from hbaselines.utils.reward_fns import negative_distance from hbaselines.envs.efficient_hrl.ant_maze_env import AntMazeEnv from hbaselines.envs.efficient_hrl.humanoid_maze_env import HumanoidMazeEnv REWARD_SCALE = 0.1 DISTANCE_THRESHOLD = 5 class UniversalAntMazeEnv(AntMazeEnv): def __init__(self, maze_id, contextual_reward, use_contexts=False, random_contexts=False, context_range=None, maze_size_scaling=8, top_down_view=False, image_size=32, horizon=500, ant_fall=False, evaluate=False, num_levels=1): super(UniversalAntMazeEnv, self).__init__( maze_id=maze_id, maze_height=0.5, maze_size_scaling=maze_size_scaling, n_bins=0, sensor_range=3., sensor_span=2 * np.pi, observe_blocks=False, put_spin_near_agent=False, top_down_view=top_down_view, image_size=image_size, manual_collision=False, ant_fall=ant_fall, evaluate=evaluate, num_levels=num_levels, ) self.horizon = horizon self.step_number = 0 self.use_contexts = use_contexts self.random_contexts = random_contexts self.context_range = context_range self.contextual_reward = contextual_reward self.current_context = None self.prev_obs = None if self.use_contexts: if self.random_contexts: assert all(isinstance(i, tuple) for i in self.context_range), "When using random contexts, every element in " "context_range, must be a tuple of (min,max) values." else: assert all(not isinstance(i, tuple) for i in self.context_range), "When not using random contexts, every element in " "context_range, must be a single value or a list of " "values." @property def context_space(self): if self.use_contexts: if self.random_contexts: context_low = [] context_high = [] for context_i in self.context_range: low, high = context_i context_low.append(low) context_high.append(high) return Box(low=np.asarray(context_low), high=np.asarray(context_high), dtype=np.float32) else: if isinstance(self.context_range[0], list): min_val = [] max_val = [] for i in range(len(self.context_range[0])): min_val.append(min(v[i] for v in self.context_range)) max_val.append(max(v[i] for v in self.context_range)) return Box(low=np.array(min_val), high=np.array(max_val)) else: return Box(low=np.asarray(self.context_range), high=np.asarray(self.context_range), dtype=np.float32) else: return None def step(self, action): obs, rew, done, _ = super(UniversalAntMazeEnv, self).step(action) info = {} if self.use_contexts: dist = self.contextual_reward( states=self.prev_obs, next_states=obs, goals=self.current_context, ) info["goal_distance"] = dist / REWARD_SCALE info["is_success"] = abs(dist) < DISTANCE_THRESHOLD * REWARD_SCALE rew = dist self.step_number += 1 done = done or self.step_number == self.horizon return obs, rew, done, info def reset(self): try: self.prev_obs = super(UniversalAntMazeEnv, self).reset() except NotImplementedError: self.prev_obs = np.empty(1) self.step_number = 0 if self.use_contexts: if not self.random_contexts: if isinstance(self.context_range[0], list): self.current_context = random.sample(self.context_range, 1) self.current_context = self.current_context[0] else: self.current_context = self.context_range else: self.current_context = [] for range_i in self.context_range: minval, maxval = range_i self.current_context.append(random.uniform(minval, maxval)) self.current_context = np.asarray(self.current_context) return self.prev_obs class UniversalHumanoidMazeEnv(HumanoidMazeEnv): def __init__(self, maze_id, contextual_reward, use_contexts=False, random_contexts=False, context_range=None, maze_size_scaling=4, top_down_view=False, image_size=32, horizon=1000): super(UniversalHumanoidMazeEnv, self).__init__( maze_id=maze_id, maze_height=0.5, maze_size_scaling=maze_size_scaling, n_bins=0, sensor_range=3., sensor_span=2 * np.pi, observe_blocks=False, put_spin_near_agent=False, top_down_view=top_down_view, image_size=image_size, manual_collision=False, ) self.horizon = horizon self.step_number = 0 self.use_contexts = use_contexts self.random_contexts = random_contexts self.context_range = context_range self.contextual_reward = contextual_reward self.current_context = None self.prev_obs = None if self.use_contexts: if self.random_contexts: assert all(isinstance(i, tuple) for i in self.context_range), "When using random contexts, every element in " "context_range, must be a tuple of (min,max) values." else: assert all(not isinstance(i, tuple) for i in self.context_range), "When not using random contexts, every element in " "context_range, must be a single value or a list of " "values." @property def context_space(self): if self.use_contexts: if self.random_contexts: context_low = [] context_high = [] for context_i in self.context_range: low, high = context_i context_low.append(low) context_high.append(high) return Box(low=np.asarray(context_low), high=np.asarray(context_high), dtype=np.float32) else: if isinstance(self.context_range[0], list): min_val = [] max_val = [] for i in range(len(self.context_range[0])): min_val.append(min(v[i] for v in self.context_range)) max_val.append(max(v[i] for v in self.context_range)) return Box(low=np.array(min_val), high=np.array(max_val), dtype=np.float32) else: return Box(low=np.asarray(self.context_range), high=np.asarray(self.context_range), dtype=np.float32) else: return None def step(self, action): obs, rew, done, info = super(UniversalHumanoidMazeEnv, self).step( action) if self.use_contexts: rew = self.contextual_reward( states=self.prev_obs, next_states=obs, goals=self.current_context, ) dist = 7.2 * np.log(rew) info["is_success"] = abs(dist) < DISTANCE_THRESHOLD self.step_number += 1 done = done or self.step_number == self.horizon return obs, rew, done, info
MIT License
alexa/alexa-apis-for-python
ask-sdk-model/ask_sdk_model/services/reminder_management/push_notification.py
PushNotification.__eq__
python
def __eq__(self, other): if not isinstance(other, PushNotification): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/alexa/alexa-apis-for-python/blob/bfe5e694daaca71bfb1a4199ca8d2514f1cac6c9/ask-sdk-model/ask_sdk_model/services/reminder_management/push_notification.py#L98-L104
import pprint import re import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime from ask_sdk_model.services.reminder_management.push_notification_status import PushNotificationStatus as PushNotificationStatus_149f8706 class PushNotification(object): deserialized_types = { 'status': 'ask_sdk_model.services.reminder_management.push_notification_status.PushNotificationStatus' } attribute_map = { 'status': 'status' } supports_multiple_types = False def __init__(self, status=None): self.__discriminator_value = None self.status = status def to_dict(self): result = {} for attr, _ in six.iteritems(self.deserialized_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x.value if isinstance(x, Enum) else x, value )) elif isinstance(value, Enum): result[attr] = value.value elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else (item[0], item[1].value) if isinstance(item[1], Enum) else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
jeeftor/alfredtoday
src/lib/oauth2client/client.py
OAuth2Credentials.access_token_expired
python
def access_token_expired(self): if self.invalid: return True if not self.token_expiry: return False now = _UTCNOW() if now >= self.token_expiry: logger.info('access_token is expired. Now: %s, token_expiry: %s', now, self.token_expiry) return True return False
True if the credential is expired or invalid. If the token_expiry isn't set, we assume the token doesn't expire.
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/oauth2client/client.py#L758-L774
import base64 import collections import copy import datetime import json import logging import os import socket import sys import tempfile import time import shutil import six from six.moves import http_client from six.moves import urllib import httplib2 from oauth2client import GOOGLE_AUTH_URI from oauth2client import GOOGLE_DEVICE_URI from oauth2client import GOOGLE_REVOKE_URI from oauth2client import GOOGLE_TOKEN_URI from oauth2client import GOOGLE_TOKEN_INFO_URI from oauth2client._helpers import _from_bytes from oauth2client._helpers import _to_bytes from oauth2client._helpers import _urlsafe_b64decode from oauth2client import clientsecrets from oauth2client import util __author__ = 'jcgregorio@google.com (Joe Gregorio)' HAS_OPENSSL = False HAS_CRYPTO = False try: from oauth2client import crypt HAS_CRYPTO = True HAS_OPENSSL = crypt.OpenSSLVerifier is not None except ImportError: pass logger = logging.getLogger(__name__) EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ' ID_TOKEN_VERIFICATION_CERTS = 'https://www.googleapis.com/oauth2/v1/certs' ID_TOKEN_VERIFICATON_CERTS = ID_TOKEN_VERIFICATION_CERTS OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob' REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,) AUTHORIZED_USER = 'authorized_user' SERVICE_ACCOUNT = 'service_account' GOOGLE_APPLICATION_CREDENTIALS = 'GOOGLE_APPLICATION_CREDENTIALS' _CLOUDSDK_CONFIG_DIRECTORY = 'gcloud' _CLOUDSDK_CONFIG_ENV_VAR = 'CLOUDSDK_CONFIG' ADC_HELP_MSG = ( 'The Application Default Credentials are not available. They are ' 'available if running in Google Compute Engine. Otherwise, the ' 'environment variable ' + GOOGLE_APPLICATION_CREDENTIALS + ' must be defined pointing to a file defining the credentials. See ' 'https://developers.google.com/accounts/docs/' 'application-default-credentials for more information.') _WELL_KNOWN_CREDENTIALS_FILE = 'application_default_credentials.json' AccessTokenInfo = collections.namedtuple( 'AccessTokenInfo', ['access_token', 'expires_in']) DEFAULT_ENV_NAME = 'UNKNOWN' NO_GCE_CHECK = os.environ.setdefault('NO_GCE_CHECK', 'False') _SERVER_SOFTWARE = 'SERVER_SOFTWARE' _GCE_METADATA_HOST = '169.254.169.254' _METADATA_FLAVOR_HEADER = 'Metadata-Flavor' _DESIRED_METADATA_FLAVOR = 'Google' _UTCNOW = datetime.datetime.utcnow class SETTINGS(object): env_name = None class Error(Exception): class FlowExchangeError(Error): class AccessTokenRefreshError(Error): class HttpAccessTokenRefreshError(AccessTokenRefreshError): def __init__(self, *args, **kwargs): super(HttpAccessTokenRefreshError, self).__init__(*args) self.status = kwargs.get('status') class TokenRevokeError(Error): class UnknownClientSecretsFlowError(Error): class AccessTokenCredentialsError(Error): class VerifyJwtTokenError(Error): class NonAsciiHeaderError(Error): class ApplicationDefaultCredentialsError(Error): class OAuth2DeviceCodeError(Error): class CryptoUnavailableError(Error, NotImplementedError): class MemoryCache(object): def __init__(self): self.cache = {} def get(self, key): return self.cache.get(key) def set(self, key, value): self.cache[key] = value def delete(self, key): self.cache.pop(key, None) def _parse_expiry(expiry): if expiry and isinstance(expiry, datetime.datetime): return expiry.strftime(EXPIRY_FORMAT) else: return None class Credentials(object): NON_SERIALIZED_MEMBERS = frozenset(['store']) def authorize(self, http): raise NotImplementedError def refresh(self, http): raise NotImplementedError def revoke(self, http): raise NotImplementedError def apply(self, headers): raise NotImplementedError def _to_json(self, strip, to_serialize=None): curr_type = self.__class__ if to_serialize is None: to_serialize = copy.copy(self.__dict__) else: to_serialize = copy.copy(to_serialize) for member in strip: if member in to_serialize: del to_serialize[member] to_serialize['token_expiry'] = _parse_expiry( to_serialize.get('token_expiry')) to_serialize['_class'] = curr_type.__name__ to_serialize['_module'] = curr_type.__module__ for key, val in to_serialize.items(): if isinstance(val, bytes): to_serialize[key] = val.decode('utf-8') if isinstance(val, set): to_serialize[key] = list(val) return json.dumps(to_serialize) def to_json(self): return self._to_json(self.NON_SERIALIZED_MEMBERS) @classmethod def new_from_json(cls, json_data): json_data_as_unicode = _from_bytes(json_data) data = json.loads(json_data_as_unicode) module_name = data['_module'] try: module_obj = __import__(module_name) except ImportError: module_name = module_name.replace('.googleapiclient', '') module_obj = __import__(module_name) module_obj = __import__(module_name, fromlist=module_name.split('.')[:-1]) kls = getattr(module_obj, data['_class']) return kls.from_json(json_data_as_unicode) @classmethod def from_json(cls, unused_data): return Credentials() class Flow(object): pass class Storage(object): def __init__(self, lock=None): self._lock = lock def acquire_lock(self): if self._lock is not None: self._lock.acquire() def release_lock(self): if self._lock is not None: self._lock.release() def locked_get(self): raise NotImplementedError def locked_put(self, credentials): raise NotImplementedError def locked_delete(self): raise NotImplementedError def get(self): self.acquire_lock() try: return self.locked_get() finally: self.release_lock() def put(self, credentials): self.acquire_lock() try: self.locked_put(credentials) finally: self.release_lock() def delete(self): self.acquire_lock() try: return self.locked_delete() finally: self.release_lock() def clean_headers(headers): clean = {} try: for k, v in six.iteritems(headers): if not isinstance(k, six.binary_type): k = str(k) if not isinstance(v, six.binary_type): v = str(v) clean[_to_bytes(k)] = _to_bytes(v) except UnicodeEncodeError: raise NonAsciiHeaderError(k, ': ', v) return clean def _update_query_params(uri, params): parts = urllib.parse.urlparse(uri) query_params = dict(urllib.parse.parse_qsl(parts.query)) query_params.update(params) new_parts = parts._replace(query=urllib.parse.urlencode(query_params)) return urllib.parse.urlunparse(new_parts) def _initialize_headers(headers): if headers is None: headers = {} else: headers = dict(headers) return headers def _apply_user_agent(headers, user_agent): if user_agent is not None: if 'user-agent' in headers: headers['user-agent'] = (user_agent + ' ' + headers['user-agent']) else: headers['user-agent'] = user_agent return headers class OAuth2Credentials(Credentials): @util.positional(8) def __init__(self, access_token, client_id, client_secret, refresh_token, token_expiry, token_uri, user_agent, revoke_uri=None, id_token=None, token_response=None, scopes=None, token_info_uri=None): self.access_token = access_token self.client_id = client_id self.client_secret = client_secret self.refresh_token = refresh_token self.store = None self.token_expiry = token_expiry self.token_uri = token_uri self.user_agent = user_agent self.revoke_uri = revoke_uri self.id_token = id_token self.token_response = token_response self.scopes = set(util.string_to_scopes(scopes or [])) self.token_info_uri = token_info_uri self.invalid = False def authorize(self, http): request_orig = http.request def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if not self.access_token: logger.info('Attempting refresh to obtain ' 'initial access_token') self._refresh(request_orig) headers = _initialize_headers(headers) self.apply(headers) _apply_user_agent(headers, self.user_agent) body_stream_position = None if all(getattr(body, stream_prop, None) for stream_prop in ('read', 'seek', 'tell')): body_stream_position = body.tell() resp, content = request_orig(uri, method, body, clean_headers(headers), redirections, connection_type) max_refresh_attempts = 2 for refresh_attempt in range(max_refresh_attempts): if resp.status not in REFRESH_STATUS_CODES: break logger.info('Refreshing due to a %s (attempt %s/%s)', resp.status, refresh_attempt + 1, max_refresh_attempts) self._refresh(request_orig) self.apply(headers) if body_stream_position is not None: body.seek(body_stream_position) resp, content = request_orig(uri, method, body, clean_headers(headers), redirections, connection_type) return (resp, content) http.request = new_request setattr(http.request, 'credentials', self) return http def refresh(self, http): self._refresh(http.request) def revoke(self, http): self._revoke(http.request) def apply(self, headers): headers['Authorization'] = 'Bearer ' + self.access_token def has_scopes(self, scopes): scopes = util.string_to_scopes(scopes) return set(scopes).issubset(self.scopes) def retrieve_scopes(self, http): self._retrieve_scopes(http.request) return self.scopes @classmethod def from_json(cls, json_data): data = json.loads(_from_bytes(json_data)) if (data.get('token_expiry') and not isinstance(data['token_expiry'], datetime.datetime)): try: data['token_expiry'] = datetime.datetime.strptime( data['token_expiry'], EXPIRY_FORMAT) except ValueError: data['token_expiry'] = None retval = cls( data['access_token'], data['client_id'], data['client_secret'], data['refresh_token'], data['token_expiry'], data['token_uri'], data['user_agent'], revoke_uri=data.get('revoke_uri', None), id_token=data.get('id_token', None), token_response=data.get('token_response', None), scopes=data.get('scopes', None), token_info_uri=data.get('token_info_uri', None)) retval.invalid = data['invalid'] return retval @property
MIT License
googlecloudplatform/data-pipeline
app/src/csvmatchreplace/transform.py
WriteErrors
python
def WriteErrors(writer, row_value, errors): row = {'row_value': row_value, 'errors': [{'message': err.message, 'value': err.value, 'index': err.index} for err in errors]} writer.write(json.dumps(row) + '\r\n')
Write out row and errors with it for later _badrows table creation. Args: writer: an object we can write to. row_value: the entire row we had a problem with. errors: an array of CellError objects.
https://github.com/googlecloudplatform/data-pipeline/blob/0bdd1664bc9ff5e36928c4609ef6127ef1e1fb3f/app/src/csvmatchreplace/transform.py#L130-L142
import cStringIO as StringIO import csv import json import logging import re from src.clients import bigquery from src.csvmatchreplace import timestamp class TableError(Exception): class CellError(TableError): def __init__(self, message, value=None, index=None): super(CellError, self).__init__(message) self.value = value self.index = index def TransformRow(row, config): transformed_row = [] bad_columns = [] columns = config['columns'] if len(row) != len(columns): bad_columns.append(CellError( 'Invalid number of elements in row. Found %d, expected %d' % (len(row), len(columns)))) for i in range(min(len(row), len(columns))): if columns[i]['wanted']: try: cell_data = TransformCell(row[i], i, columns[i]) transformed_row.append(cell_data) except CellError as err: logging.warning('Transform phase: Bad data @ Column %d = %r', i, err) bad_columns.append(err) transformed_row.append(err.value) return (transformed_row, bad_columns) def TransformCell(cell, index, column): output = cell for pattern in column.get('transformations', []): output = re.sub(pattern['match'], pattern['replace'], output) output = NormalizeCellByType(output, index, column['type']) return output def NormalizeCellByType(cell, index, column_type): if not cell: return '' try: if column_type == bigquery.ColumnTypes.INTEGER: cell = int(cell) elif column_type == bigquery.ColumnTypes.FLOAT: cell = float(cell) elif column_type == bigquery.ColumnTypes.BOOLEAN: if str(cell).lower() in ('true', '1'): cell = 'True' elif str(cell).lower() in ('false', '0'): cell = 'False' else: raise ValueError('invalid value') elif column_type == bigquery.ColumnTypes.TIMESTAMP: cell = timestamp.NormalizeTimeStamp(cell) except ValueError as err: raise CellError('Invalid value %r for column type %s: %r' % (cell, bigquery.ColumnTypes.strings[column_type], err), str(cell), index) return str(cell)
Apache License 2.0
lucas-emery/rocket-league-gym
rlgym/gym.py
Gym.close
python
def close(self): self._comm_handler.close_pipe() if self._game_process is not None: self._game_process.terminate()
Disconnect communication with the Bakkesmod plugin and close the game. This should only be called if you are finished with your current RLGym environment instance.
https://github.com/lucas-emery/rocket-league-gym/blob/7d8e37809126f6702c5f433e7dafdc3aedde4033/rlgym/gym.py#L127-L134
from time import sleep from typing import List, Union, Tuple, Dict, Any import numpy as np from gym import Env from rlgym.gamelaunch import launch_rocket_league, run_injector, page_rocket_league, LaunchPreference from rlgym.communication import CommunicationHandler, Message class Gym(Env): def __init__(self, match, pipe_id=0, launch_preference=LaunchPreference.EPIC, use_injector=False, force_paging=False): super().__init__() self._match = match self.observation_space = match.observation_space self.action_space = match.action_space self._launch_preference = launch_preference self._use_injector = use_injector self._force_paging = force_paging self._comm_handler = CommunicationHandler() self._local_pipe_name = CommunicationHandler.format_pipe_id(pipe_id) self._local_pipe_id = pipe_id self._game_process = None self._open_game() self._setup_plugin_connection() if self._force_paging: self._page_client() self._prev_state = None def _open_game(self): print("Launching Rocket League, make sure bakkesmod is running.") self._game_process = launch_rocket_league(self._local_pipe_name, self._launch_preference) if self._use_injector: sleep(3) run_injector() def _setup_plugin_connection(self): self._comm_handler.open_pipe(self._local_pipe_name) self._comm_handler.send_message(header=Message.RLGYM_CONFIG_MESSAGE_HEADER, body=self._match.get_config()) self._comm_handler.send_message(header=Message.RLGYM_RESET_GAME_STATE_MESSAGE_HEADER, body=self._match.get_reset_state()) def _page_client(self) -> bool: if self._game_process is None: print("Forced paging is only supported for the epic games version") return False else: print("Forcing Rocket League to page unused memory. PID:", self._game_process.pid) return page_rocket_league(rl_pid=self._game_process.pid) def reset(self) -> List: state_str = self._match.get_reset_state() exception = self._comm_handler.send_message(header=Message.RLGYM_RESET_GAME_STATE_MESSAGE_HEADER, body=state_str) if exception is not None: self._attempt_recovery() exception = self._comm_handler.send_message(header=Message.RLGYM_RESET_GAME_STATE_MESSAGE_HEADER, body=state_str) if exception is not None: import sys print("!UNABLE TO RECOVER ROCKET LEAGUE!\nEXITING") sys.exit(-1) state = self._receive_state() self._match.episode_reset(state) self._prev_state = state return self._match.build_observations(state) def step(self, actions: Any) -> Tuple[List, List, bool, Dict]: actions = self._match.parse_actions(actions, self._prev_state) actions_sent = self._send_actions(actions) received_state = self._receive_state() if received_state is None: print("FAILED TO RECEIEVE STATE! FALLING TO",self._prev_state) state = self._prev_state else: state = received_state obs = self._match.build_observations(state) done = self._match.is_done(state) or received_state is None or not actions_sent reward = self._match.get_rewards(state, done) self._prev_state = state info = { 'state': state, 'result': self._match.get_result(state) } return obs, reward, done, info
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/account_signature_definition.py
AccountSignatureDefinition.date_stamp_properties
python
def date_stamp_properties(self, date_stamp_properties): self._date_stamp_properties = date_stamp_properties
Sets the date_stamp_properties of this AccountSignatureDefinition. :param date_stamp_properties: The date_stamp_properties of this AccountSignatureDefinition. # noqa: E501 :type: DateStampProperties
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/account_signature_definition.py#L129-L137
import pprint import re import six from docusign_esign.client.configuration import Configuration class AccountSignatureDefinition(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'date_stamp_properties': 'DateStampProperties', 'disallow_user_resize_stamp': 'str', 'external_id': 'str', 'image_type': 'str', 'is_default': 'str', 'nrds_id': 'str', 'nrds_last_name': 'str', 'phonetic_name': 'str', 'signature_font': 'str', 'signature_groups': 'list[SignatureGroupDef]', 'signature_id': 'str', 'signature_initials': 'str', 'signature_name': 'str', 'signature_type': 'str', 'signature_users': 'list[SignatureUserDef]', 'stamp_format': 'str', 'stamp_size_mm': 'str' } attribute_map = { 'date_stamp_properties': 'dateStampProperties', 'disallow_user_resize_stamp': 'disallowUserResizeStamp', 'external_id': 'externalID', 'image_type': 'imageType', 'is_default': 'isDefault', 'nrds_id': 'nrdsId', 'nrds_last_name': 'nrdsLastName', 'phonetic_name': 'phoneticName', 'signature_font': 'signatureFont', 'signature_groups': 'signatureGroups', 'signature_id': 'signatureId', 'signature_initials': 'signatureInitials', 'signature_name': 'signatureName', 'signature_type': 'signatureType', 'signature_users': 'signatureUsers', 'stamp_format': 'stampFormat', 'stamp_size_mm': 'stampSizeMM' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._date_stamp_properties = None self._disallow_user_resize_stamp = None self._external_id = None self._image_type = None self._is_default = None self._nrds_id = None self._nrds_last_name = None self._phonetic_name = None self._signature_font = None self._signature_groups = None self._signature_id = None self._signature_initials = None self._signature_name = None self._signature_type = None self._signature_users = None self._stamp_format = None self._stamp_size_mm = None self.discriminator = None setattr(self, "_{}".format('date_stamp_properties'), kwargs.get('date_stamp_properties', None)) setattr(self, "_{}".format('disallow_user_resize_stamp'), kwargs.get('disallow_user_resize_stamp', None)) setattr(self, "_{}".format('external_id'), kwargs.get('external_id', None)) setattr(self, "_{}".format('image_type'), kwargs.get('image_type', None)) setattr(self, "_{}".format('is_default'), kwargs.get('is_default', None)) setattr(self, "_{}".format('nrds_id'), kwargs.get('nrds_id', None)) setattr(self, "_{}".format('nrds_last_name'), kwargs.get('nrds_last_name', None)) setattr(self, "_{}".format('phonetic_name'), kwargs.get('phonetic_name', None)) setattr(self, "_{}".format('signature_font'), kwargs.get('signature_font', None)) setattr(self, "_{}".format('signature_groups'), kwargs.get('signature_groups', None)) setattr(self, "_{}".format('signature_id'), kwargs.get('signature_id', None)) setattr(self, "_{}".format('signature_initials'), kwargs.get('signature_initials', None)) setattr(self, "_{}".format('signature_name'), kwargs.get('signature_name', None)) setattr(self, "_{}".format('signature_type'), kwargs.get('signature_type', None)) setattr(self, "_{}".format('signature_users'), kwargs.get('signature_users', None)) setattr(self, "_{}".format('stamp_format'), kwargs.get('stamp_format', None)) setattr(self, "_{}".format('stamp_size_mm'), kwargs.get('stamp_size_mm', None)) @property def date_stamp_properties(self): return self._date_stamp_properties @date_stamp_properties.setter
MIT License
colcarroll/carpo
test/util.py
ProjectManager.add_nb_with_input
python
def add_nb_with_input(self, nb_input): new_filename = self.filename() shutil.copyfile(SAMPLE_NB, new_filename) self.set_nb_kernel(new_filename) self.set_nb_input(new_filename, nb_input) assert self.get_output(new_filename) == '0' return new_filename
Add a notebook to the directory with the given input string.
https://github.com/colcarroll/carpo/blob/2f65290f4a77682dfdc6fa3311650583f7d3ca2a/test/util.py#L62-L71
import json import os import shutil import tempfile from git import Repo from jupyter_client.kernelspec import find_kernel_specs DIR = os.path.dirname(os.path.realpath(__file__)) SAMPLE_NB = os.path.join(DIR, 'sample_nb.ipynb') class ProjectManager(object): def __init__(self): self.file_count = 0 self._repo = None self.test_directory = tempfile.mkdtemp() self.db_file = os.path.join(self.test_directory, 'carpo.db') self.kernelspec = self.get_kernelspec() def filename(self): self.file_count += 1 return os.path.join(self.test_directory, 'sample_{}.ipynb'.format(self.file_count)) def get_kernelspec(self): for name in find_kernel_specs(): if name.startswith('python'): return name raise RuntimeError('No python kernel found to run notebooks!') def exists(self): return os.path.isdir(self.test_directory) def make_test_directory(self): if not self.exists(): os.mkdir(self.test_directory) def delete_test_directory(self): if self.exists(): shutil.rmtree(self.test_directory) @property def repo(self): if self._repo is None: self._repo = Repo.init(self.test_directory) return self._repo def commit(self, *files): to_add = self.repo.untracked_files[:] to_add.extend(files) if to_add: self.repo.index.add(to_add) return self.repo.index.commit('WIP').hexsha
MIT License
hoburg/pyxfoil
genpolar.py
sweep
python
def sweep(airfoils, res, min_alfa=4, write_file=True, plots_on=False, panels=200): os.chdir(cwd) sessionlog.comment("Beginning sweep with minimum alfa of " + str(min_alfa)) xf = pyxfoil.session(logfile='sweep', plots=plots_on, force_zero=True) xf.naca('0010') xf.set_panels(panels) timeouts = 0 start_time = time.time() last_time = start_time airfoils, res = list(airfoils), list(res) for naca in airfoils: xf.naca(naca) for re in res: percentage = 100*round((airfoils.index(naca)*len(res)+res.index(re)+1.)/(len(airfoils)*len(res)), 5) polarname = "NACA" + naca + "_Re" + str(int(round(re/1000))).zfill(8) + "k.pol" if polarname in get_existing(): print "NACA " + naca + " Re " + (str(int(re/1000)) + 'k').rjust(8) + " has already been run: skipping (" + str(percentage) + "%)" continue xf.set_re(re) try: xf.generate_polar(filename=polarname, min_alfa=min_alfa, writefile=write_file) sessionlog.comment("NACA " + naca + ", re=" + str(re) + " simulation complete.") this_time = time.time() print str(percentage) + "% complete, " + str(round(this_time-last_time, 3)) + " seconds" last_time = this_time except pexpect.TIMEOUT: xf.force_quit() print "XFOIL timed out at NACA=" + naca + " Re=" + str(re) sessionlog.timeout(naca, re) timeouts += 1 print "Attempting to restarting at current set." xf = pyxfoil.session(airfoil=naca, re=re, logfile='sweep', plots=plots_on, force_zero=True) try: xf.generate_polar(filename=polarname, min_alfa=min_alfa, writefile=write_file) sessionlog.comment("NACA " + naca + ", Re=" + str(re) + " recovered on second try.") this_time = time.time() print str(percentage) + "% complete, " + str(round(this_time-last_time, 3)) + " seconds" last_time = this_time except pexpect.TIMEOUT: xf.force_quit() sessionlog.comment("NACA " + naca + ", Re=" + str(re) + " failed to recover on second try. Continuing at next set.") print "NACA " + naca + ", Re=" + str(re) + " failed to recover on second try. Continuing at next set." xf = pyxfoil.session(logfile='sweep', plots=plots_on, force_zero=True, airfoil=naca) xf.quit() total_seconds = time.time()-start_time average_time = round(total_seconds/(len(res)*len(airfoils)), 3) m, s = divmod(total_seconds, 60) h, m = divmod(m, 60) timeout_count = "Number of xfoil timeouts: " + str(timeouts) completion_time = "Time to complete: " + str(h) + " hours " + str(m) + " minutes " + str(round(s, 3)) + " seconds." simulation_count = "Number of simulations: " + str(len(airfoils) * len(res)) average_time = "Average simulation length: " + str(average_time) + ' seconds.' sessionlog.comment(timeout_count) sessionlog.comment(completion_time) sessionlog.comment(simulation_count) sessionlog.comment(average_time) sessionlog.sweep_param(airfoils, res) print timeout_count + '\n' + completion_time + '\n' + simulation_count + '\n' + average_time os.chdir(cwd)
Runs a large sweep over airfoil and re range @param airfoils iterable of NACA numbers to sweep over @param res iterable reynolds numbers to sweep over @param write_file boolean indicating whether or not to create polars @param plots_on boolean indicating whether or not to simulate with plots on @param panels included as per original genpolar file
https://github.com/hoburg/pyxfoil/blob/aa089c3bbd0c0911400b1d525d544b796dc1253d/genpolar.py#L65-L138
import sys, os from decimal import * import pexpect, time, shutil, code, numpy package_files = ['pyxfoil', 'sorter', 'div_sort', '__init__', 'runlog', 'plotter'] input_args = sys.argv[1:] homedir = os.getcwd() + '/' if len(input_args) == 0: print "No directory specified:" run_dir = raw_input("Enter a run directory >> " ).rstrip('/') else: run_dir = input_args[0].rstrip('/') try: os.chdir(run_dir) cwd = os.getcwd() + '/' runlogfile = file(cwd + 'logs/sessionlog.txt', 'a') print "Existing run loaded: " + run_dir except OSError: try: os.mkdir(run_dir) os.chdir(run_dir) os.mkdir('src') os.mkdir('logs') os.mkdir('mergedump') os.mkdir('savedpolars') cwd = os.getcwd() + '/' for filename in package_files: shutil.copyfile(homedir + 'pyxfoil/' + filename + '.py', cwd + 'src/' + filename + '.py') runlogfile = file(cwd + 'logs/sessionlog.txt', 'w') print "New run created: " + run_dir except OSError: print "Fatal error: directory " + run_dir + " not found and could not be created." sys.exit(0) finally: sys.path.append(cwd) from src import pyxfoil, sorter, runlog, plotter sessionlog = runlog.runlog(runlogfile) def get_existing(dir='savedpolars'): existing_polars = dict() try: os.chdir(cwd + dir) existing_polars = [p for p in os.listdir(os.getcwd()) if '.pol' in p] os.chdir(cwd) except OSError: pass return existing_polars
MIT License
square/connect-python-sdk
squareconnect/models/employee.py
Employee.first_name
python
def first_name(self): return self._first_name
Gets the first_name of this Employee. Given (first) name of the employee. :return: The first_name of this Employee. :rtype: str
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/employee.py#L97-L105
from pprint import pformat from six import iteritems import re class Employee(object): def __init__(self, id=None, first_name=None, last_name=None, email=None, phone_number=None, location_ids=None, status=None, created_at=None, updated_at=None): self.swagger_types = { 'id': 'str', 'first_name': 'str', 'last_name': 'str', 'email': 'str', 'phone_number': 'str', 'location_ids': 'list[str]', 'status': 'str', 'created_at': 'str', 'updated_at': 'str' } self.attribute_map = { 'id': 'id', 'first_name': 'first_name', 'last_name': 'last_name', 'email': 'email', 'phone_number': 'phone_number', 'location_ids': 'location_ids', 'status': 'status', 'created_at': 'created_at', 'updated_at': 'updated_at' } self._id = id self._first_name = first_name self._last_name = last_name self._email = email self._phone_number = phone_number self._location_ids = location_ids self._status = status self._created_at = created_at self._updated_at = updated_at @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property
Apache License 2.0
sony/nnabla
python/src/nnabla/utils/audio_utils/__init__.py
auresize
python
def auresize(audio_arr, size, channel_first=False, **kwargs): return backend_manager.module.auresize(audio_arr, size, channel_first=channel_first, **kwargs)
Resize ''audio_arr'' to ''size''. As default, the shape of input audio has to be (samples, channels). Args: audio_arr (numpy.ndarray): Input audio. size (tuple of int): Output shape. The order is (samples, channels). channel_first (bool): This argument specifies the shape of audio is whether (samples, channels) or (channels, samples). Default value is False, which means the audio shape is (samples, channels) Returns: numpy.ndarray (same sample-channel order as audio_arr)
https://github.com/sony/nnabla/blob/fef9b6bca02a002de880a13f3196df14369445f4/python/src/nnabla/utils/audio_utils/__init__.py#L146-L161
from __future__ import absolute_import import numpy as np from .backend_manager import backend_manager from .common import rescale_intensity, _auto_scale_before def set_backend(backend): backend_manager.backend = backend def get_backend(): return backend_manager.backend def get_available_backends(): return backend_manager.get_available_backends() def minmax_auto_scale(audio_arr, out_datatype): _auto_scale_before(audio_arr, out_datatype) if str(out_datatype).find('int') > -1: output_high = np.iinfo(out_datatype).max output_low = np.iinfo(out_datatype).min else: output_high = np.finfo(out_datatype).max output_low = np.finfo(out_datatype).min output_type = out_datatype return rescale_intensity(audio_arr, input_low=audio_arr.min(), input_high=audio_arr.max(), output_low=output_low, output_high=output_high, output_type=output_type) def auread(path, channel_first=False, raw_format_param=None, **kwargs): from nnabla.utils.data_source_loader import ResourceFileReader source = ResourceFileReader(path) return backend_manager.module.auread(source, channel_first=channel_first, raw_format_param=None, **kwargs) def ausave(path, audio_arr, channel_first=False, frame_rate=48000, **kwargs): backend_manager.module.ausave( path, audio_arr, channel_first=channel_first, frame_rate=frame_rate, **kwargs)
Apache License 2.0
ros-industrial/robodk_postprocessors
KUKA_KRC2_CamRob.py
RobotPost.setTool
python
def setTool(self, pose, tool_id, tool_name): if tool_id is not None and tool_id >= 0: self.TOOL_ID = tool_id if self.ProgCSV < 0: self.addline('$TOOL = {FRAME: ' + pose_2_str(pose) + '}') else: self.addlog('Warning: Can not set the tool frame inside CSV file')
Change the robot TCP
https://github.com/ros-industrial/robodk_postprocessors/blob/d7e6c1c07758d67d2906cfd638049bdff88cca72/KUKA_KRC2_CamRob.py#L230-L238
from robodk import * HEADER = ''' ;FOLD INI BAS (#INITMOV,0 ) ;ENDFOLD (INI) ;FOLD STARTPOS $BWDSTART = FALSE PDAT_ACT = PDEFAULT BAS(#PTP_DAT) FDAT_ACT = {TOOL_NO 0,BASE_NO 0,IPO_FRAME #BASE} BAS(#FRAMES) ;ENDFOLD ;FOLD SET DEFAULT SPEED $VEL.CP=0.2 BAS(#VEL_PTP,100) BAS(#TOOL,0) BAS(#BASE,0) ;ENDFOLD PTP $AXIS_ACT ; skip BCO quickly ''' def pose_2_str(pose): [x,y,z,r,p,w] = pose_2_xyzrpw(pose) return ('X %.3f,Y %.3f,Z %.3f,A %.3f,B %.3f,C %.3f' % (x,y,z,w,p,r)) def pose_2_str_ext(pose,joints): njoints = len(joints) if njoints <= 6: return pose_2_str(pose) else: extaxes = '' for i in range(njoints-6): extaxes = extaxes + (',E%i %.5f' % (i+1, joints[i+6])) return pose_2_str(pose) + extaxes def angles_2_str(angles): str = '' data = ['A1','A2','A3','A4','A5','A6','E1','E2','E3','E4','E5','E6'] for i in range(len(angles)): str = str + ('%s %.5f,' % (data[i], angles[i])) str = str[:-1] return str class RobotPost(object): PROG_EXT = 'src' ROBOT_POST = '' ROBOT_NAME = '' PROG_FILES = [] PROG = '' PROG_CSV = [] LOG = '' nAxes = 6 E01 = 0 nProgs = 0 nLineCSV = 0 ProgCSV = -1 SPEED_MMS = 150 SPEED_MMS_MAX = 2000 ACCEL_MMSS = 2000 TOOL_ID = 0 FRAME_ID = 0 LAST_POSE_CSV = None PROG_NAME_CSV = None C_DIS = '' C_PTP = '' def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs): self.ROBOT_POST = robotpost self.ROBOT_NAME = robotname self.PROG = '' self.LOG = '' self.nAxes = robot_axes def ProgStart(self, progname): self.nProgs = self.nProgs + 1 self.PROG_NAME_CSV = progname self.addline('DEF %s ( )' % progname) self.PROG = self.PROG + HEADER self.addline('$ACC.CP = %.5f' % (self.ACCEL_MMSS/1000.0)) def ProgFinish(self, progname): self.addline('END') def ProgSave(self, folder, progname, ask_user = False, show_result = False): progname = progname + '.' + self.PROG_EXT progname_csv = self.PROG_NAME_CSV + '.csv' if ask_user or not DirExists(folder): filesave = getSaveFile(folder, progname, 'Save program as...') if filesave is not None: filesave = filesave.name filesave_csv = getFileDir(filesave) + '/' + progname_csv else: return else: filesave = folder + '/' + progname filesave_csv = folder + '/' + progname_csv fid = open(filesave, "w") fid.write(self.PROG) fid.close() fidcsv = open(filesave_csv, "w") for line in self.PROG_CSV: fidcsv.write(line) fidcsv.write('\n') fidcsv.close() print('SAVED: %s\n' % filesave) self.PROG_FILES = filesave if show_result: if type(show_result) is str: import subprocess p = subprocess.Popen([show_result, filesave, filesave_csv]) elif type(show_result) is list: import subprocess p = subprocess.Popen(show_result + [filesave]) else: import os os.startfile(filesave) if len(self.LOG) > 0: mbox('Program generation LOG:\n\n' + self.LOG) def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass): UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass) def MoveJ(self, pose, joints, conf_RLF=None): self.addline('PTP {' + angles_2_str(joints) + '}' + self.C_PTP) def MoveL(self, pose, joints, conf_RLF=None): if self.nAxes > 6: self.E01 = joints[6] if self.ProgCSV == self.nProgs: self.addline_csv(pose) elif self.ProgCSV < 0: self.ProgCSV = self.nProgs self.addline('CR_PROCESS_FILE("%s.csv", 1, -1, "OK" )' % (self.PROG_NAME_CSV)); self.addline_csv(pose) else: self.addline('LIN {' + pose_2_str_ext(pose,joints) + '}' + self.C_DIS) def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None): if self.ProgCSV < 0: self.addline('CIRC {' + pose_2_str_ext(pose1,joints1) + '},{' + pose_2_str_ext(pose2,joints2) + '}' + self.C_DIS) else: self.addlog('Warning: Can not move circular inside a CSV file') def setFrame(self, pose, frame_id=None, frame_name=None): if frame_id is not None and frame_id >= 0: self.FRAME_ID = frame_id if self.ProgCSV < 0: self.addline('$BASE = {FRAME: ' + pose_2_str(pose) + '}') else: self.addlog('Warning: Can not set the reference frame inside CSV file')
Apache License 2.0
getsentry/sentry-python
sentry_sdk/scope.py
Scope.span
python
def span(self): return self._span
Get/set current tracing span or transaction.
https://github.com/getsentry/sentry-python/blob/cad2f65316bab4ee5792b1b788c32c57293eea5e/sentry_sdk/scope.py#L194-L197
from copy import copy from collections import deque from itertools import chain from sentry_sdk._functools import wraps from sentry_sdk._types import MYPY from sentry_sdk.utils import logger, capture_internal_exceptions from sentry_sdk.tracing import Transaction from sentry_sdk.attachments import Attachment if MYPY: from typing import Any from typing import Dict from typing import Optional from typing import Deque from typing import List from typing import Callable from typing import TypeVar from sentry_sdk._types import ( Breadcrumb, Event, EventProcessor, ErrorProcessor, ExcInfo, Hint, Type, ) from sentry_sdk.tracing import Span from sentry_sdk.session import Session F = TypeVar("F", bound=Callable[..., Any]) T = TypeVar("T") global_event_processors = [] def add_global_event_processor(processor): global_event_processors.append(processor) def _attr_setter(fn): return property(fset=fn, doc=fn.__doc__) def _disable_capture(fn): @wraps(fn) def wrapper(self, *args, **kwargs): if not self._should_capture: return try: self._should_capture = False return fn(self, *args, **kwargs) finally: self._should_capture = True return wrapper class Scope(object): __slots__ = ( "_level", "_name", "_fingerprint", "_transaction", "_user", "_tags", "_contexts", "_extras", "_breadcrumbs", "_event_processors", "_error_processors", "_should_capture", "_span", "_session", "_attachments", "_force_auto_session_tracking", ) def __init__(self): self._event_processors = [] self._error_processors = [] self._name = None self.clear() def clear(self): self._level = None self._fingerprint = None self._transaction = None self._user = None self._tags = {} self._contexts = {} self._extras = {} self._attachments = [] self.clear_breadcrumbs() self._should_capture = True self._span = None self._session = None self._force_auto_session_tracking = None @_attr_setter def level(self, value): self._level = value def set_level(self, value): self._level = value @_attr_setter def fingerprint(self, value): self._fingerprint = value @property def transaction(self): if self._span is None: return None if self._span.containing_transaction is None: return None return self._span.containing_transaction @transaction.setter def transaction(self, value): self._transaction = value span = self._span if span and isinstance(span, Transaction): span.name = value @_attr_setter def user(self, value): self.set_user(value) def set_user(self, value): self._user = value if self._session is not None: self._session.update(user=value) @property
BSD 2-Clause Simplified License
mozilla/remo
remo/profiles/api/api_v1.py
RepResource.apply_filters
python
def apply_filters(self, request, applicable_filters): base_object_list = (super(RepResource, self). apply_filters(request, applicable_filters)) query = request.GET.get('query', None) if query: query = unquote(query) qset = Q() for term in query.split(): for key in ('first_name__istartswith', 'last_name__istartswith'): qset |= Q(**{key: term}) qset |= (Q(userprofile__display_name__istartswith=query) | Q(userprofile__local_name__istartswith=query) | Q(userprofile__irc_name__istartswith=query) | Q(email__istartswith=query) | Q(userprofile__private_email__istartswith=query) | Q(userprofile__country__istartswith=query) | Q(userprofile__region__istartswith=query) | Q(userprofile__city__istartswith=query) | Q(userprofile__mozillians_profile_url__icontains=query) | Q(userprofile__functional_areas__name__istartswith=query) | Q(userprofile__mobilising_skills__name__istartswith=query) | Q(userprofile__mobilising_interests__name__istartswith=query)) base_object_list = base_object_list.filter(qset).distinct() group = request.GET.get('group', None) if group: if group == 'mentor': base_object_list = base_object_list.filter(groups__name='Mentor') elif group == 'council': base_object_list = base_object_list.filter(groups__name='Council') elif group == 'rep': base_object_list = base_object_list.filter(groups__name='Rep') elif group == 'review': base_object_list = base_object_list.filter(groups__name='Review') elif group == 'peers': base_object_list = base_object_list.filter(groups__name='Peers') elif group == 'resources': base_object_list = base_object_list.filter(groups__name='Resources') elif group == 'onboarding': base_object_list = base_object_list.filter(groups__name='Onboarding') elif group == 'newsletter': base_object_list = base_object_list.filter(groups__name='Newsletter') return base_object_list
Add special 'query' parameter to filter Reps. When 'query' parameter is present, Rep list is filtered by last name, first name, display name and local name with a case insensitive matching method. The 'query' parameters exists in parallel with 'filter' parameters as defined by tastypie and RepResource schema.
https://github.com/mozilla/remo/blob/751c266a09ea560220029d95eb54359564f93d5e/remo/profiles/api/api_v1.py#L183-L240
from datetime import timedelta from urllib import unquote from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db.models import Q from django.http import QueryDict from django.utils.timezone import now from tastypie import fields from tastypie.authentication import Authentication from tastypie.authorization import ReadOnlyAuthorization from tastypie.constants import ALL, ALL_WITH_RELATIONS from tastypie.resources import ModelResource from remo.api import HttpCache, RemoAPIThrottle, RemoThrottleMixin from remo.base.serializers import CSVSerializer from remo.profiles.templatetags.helpers import get_avatar_url from remo.profiles.models import UserProfile, FunctionalArea, MobilisingInterest, MobilisingSkill from remo.reports.models import NGReport from remo.reports.utils import get_last_report class FunctionalAreasResource(RemoThrottleMixin, ModelResource): class Meta: queryset = FunctionalArea.active_objects.all() resource_name = 'functionalareas' authentication = Authentication() authorization = ReadOnlyAuthorization() include_resource_uri = False include_absolute_url = False allowed_methods = ['get'] fields = ['name'] filtering = {'name': ALL} throttle = RemoAPIThrottle() class MobilisingSkillsResource(RemoThrottleMixin, ModelResource): class Meta: queryset = MobilisingSkill.active_objects.all() resource_name = 'mobilisingskills' authentication = Authentication() authorization = ReadOnlyAuthorization() include_resource_uri = False include_absolute_url = False allowed_methods = ['get'] fields = ['name'] filtering = {'name': ALL} throttle = RemoAPIThrottle() class MobilisingInterestsResource(RemoThrottleMixin, ModelResource): class Meta: queryset = MobilisingInterest.active_objects.all() resource_name = 'mobilisinginterests' authentication = Authentication() authorization = ReadOnlyAuthorization() include_resource_uri = False include_absolute_url = False allowed_methods = ['get'] fields = ['name'] filtering = {'name': ALL} throttle = RemoAPIThrottle() class ProfileResource(RemoThrottleMixin, ModelResource): profile_url = fields.CharField() avatar_url = fields.CharField() is_mentor = fields.BooleanField() is_council = fields.BooleanField() functional_areas = fields.ToManyField(FunctionalAreasResource, attribute='functional_areas', full=True, null=True) mobilising_skills = fields.ToManyField(MobilisingSkillsResource, attribute='mobilising_skills', full=True, null=True) mobilising_interests = fields.ToManyField(MobilisingInterestsResource, attribute='mobilising_interests', full=True, null=True) mentor = fields.ToOneField('remo.profiles.api.api_v1.RepResource', attribute='mentor', null=True) last_report_date = fields.DateField() class Meta: queryset = UserProfile.objects.filter(registration_complete=True) resource_name = 'profile' authentication = Authentication() authorization = ReadOnlyAuthorization() include_resource_uri = False include_absolute_url = False allowed_methods = ['get'] fields = ['city', 'region', 'country', 'display_name', 'local_name', 'lon', 'lat', 'mozillians_profile_url', 'twitter_account', 'facebook_url', 'diaspora_url', 'personal_blog_feed', 'irc_name'] ordering = ['country'] filtering = {'display_name': ALL, 'local_name': ALL, 'irc_name': ALL, 'country': ALL, 'region': ALL, 'city': ALL, 'functional_areas': ALL_WITH_RELATIONS, 'mobilising_skills': ALL_WITH_RELATIONS, 'mobilising_interests': ALL_WITH_RELATIONS} max_limit = 40 throttle = RemoAPIThrottle() def dehydrate(self, bundle): if bundle.request.method == 'GET': req = bundle.request.GET if req.get('format') == 'csv': bundle.data.pop('functional_areas', None) bundle.data.pop('mobilising_skills', None) bundle.data.pop('mobilising_interests', None) bundle.data.pop('personal_blog_feed', None) bundle.data.pop('profile_url', None) return bundle def dehydrate_profile_url(self, bundle): return (settings.SITE_URL + reverse('profiles_view_profile', kwargs={'display_name': bundle.obj.display_name})) def dehydrate_avatar_url(self, bundle): return get_avatar_url(bundle.obj.user, -1) def dehydrate_is_mentor(self, bundle): return bundle.obj.user.groups.filter(name='Mentor').count() == 1 def dehydrate_is_council(self, bundle): return bundle.obj.user.groups.filter(name='Council').count() == 1 def dehydrate_last_report_date(self, bundle): start_period = now().date() - timedelta(weeks=4) end_period = now().date() + timedelta(weeks=4) reports = bundle.obj.user.ng_reports.filter( report_date__range=(start_period, end_period)) try: return reports.latest('report_date').report_date except NGReport.DoesNotExist: report = get_last_report(bundle.obj.user) if report: return report.report_date return None class RepResource(RemoThrottleMixin, ModelResource): fullname = fields.CharField(attribute='get_full_name') profile = fields.ToOneField(ProfileResource, attribute='userprofile', full=True, null=True) class Meta: cache = HttpCache(control={'max_age': 3600, 'public': True, 's-maxage': 3600}) queryset = User.objects.filter(userprofile__registration_complete=True, groups__name='Rep') resource_name = 'rep' authentication = Authentication() authorization = ReadOnlyAuthorization() serializer = CSVSerializer(formats=['json', 'jsonp', 'csv']) allowed_methods = ['get'] fields = ['first_name', 'last_name'] ordering = ['profile', 'first_name', 'last_name'] filtering = {'first_name': ALL, 'last_name': ALL, 'profile': ALL_WITH_RELATIONS} throttle = RemoAPIThrottle()
BSD 3-Clause New or Revised License
zeaphoo/postmodel
postmodel/models/model.py
Model.get_or_create
python
async def get_or_create(cls, defaults = None, **kwargs): if not defaults: defaults = {} instance = await cls.filter(**kwargs).first() if instance: return instance, False return await cls.create(**defaults, **kwargs), True
Fetches the object if exists (filtering on the provided parameters), else creates an instance with any unspecified parameters as default values.
https://github.com/zeaphoo/postmodel/blob/727a0a4f12d313fc83488ebecf6368847c6c97a6/postmodel/models/model.py#L397-L407
from copy import copy, deepcopy from postmodel.exceptions import ConfigurationError, OperationalError, StaleObjectError from postmodel.main import Postmodel from collections import OrderedDict from .query import QuerySet, FilterBuilder from .fields import Field, DataVersionField import re import datetime import uuid import json _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') def camel_to_snake(s): subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() class MetaInfo: __slots__ = ( "abstract", "table", "db_name", "fields", "db_fields", "fields_db_projection", "fields_db_projection_reverse", "fields_map", "auto_fields", "dataversion_field", "unique_together", "indexes", "pk_attr", "table_description", "pk", "db_pk_field", "filters" ) def __init__(self, meta) -> None: self.abstract = getattr(meta, "abstract", False) self.table = getattr(meta, "table", ) self.db_name = getattr(meta, "db_name", 'default') self.unique_together = self._get_together(meta, "unique_together") self.indexes = self._get_together(meta, "indexes") self.fields = set() self.db_fields = set() self.fields_db_projection = OrderedDict() self.fields_db_projection_reverse = OrderedDict() self.fields_map = OrderedDict() self.auto_fields = [] self.dataversion_field = "" self.pk_attr = getattr(meta, "pk_attr", "") self.table_description = getattr(meta, "table_description", "") self.pk = None self.db_pk_field = "" self.filters = {} def _get_together(self, meta, together: str): _together = getattr(meta, together, ()) if isinstance(_together, (list, tuple)): if _together and isinstance(_together[0], str): _together = (_together,) return _together def finalise_pk(self) -> None: self.pk = self.fields_map[self.pk_attr] self.db_pk_field = self.pk.db_field or self.pk_attr def finalize_filters(self): for field_name, db_field in self.fields_db_projection.items(): filters = FilterBuilder.get_filters_for_field(self.fields_map[field_name], field_name, db_field) self.filters[field_name] = filters def get_filter(self, key: str) -> dict: return self.filters.get(key, None) def finalise_model(self) -> None: if not self.abstract and not self.pk_attr: raise Exception('model must have pk or be abstract.') if self.pk_attr: self.finalise_pk() self.finalize_filters() self.finalise_fields() def finalise_fields(self) -> None: self.db_fields = set(self.fields_db_projection.values()) self.fields = set(self.fields_map.keys()) self.fields_db_projection_reverse = { value: key for key, value in self.fields_db_projection.items() } for field_name in self.fields: field = self.fields_map[field_name] if hasattr(field, 'auto_value'): self.auto_fields.append(field) if isinstance(field, DataVersionField): if not self.dataversion_field: self.dataversion_field = field_name else: raise Exception('model class can only have one DataVersionField.') class ModelMeta(type): __slots__ = () def __new__(mcs, name: str, bases, attrs: dict, *args, **kwargs): fields_db_projection = OrderedDict() fields_map = OrderedDict() meta_class = attrs.pop("Meta", type("Meta", (), {})) if not hasattr(meta_class, "table"): setattr(meta_class, "table", camel_to_snake(name)) meta = MetaInfo(meta_class) fields_map = {} fields_db_projection = {} pk_attr = None for key, value in attrs.items(): if isinstance(value, Field): fields_map[key] = value value.model_field_name = key if value.pk: if pk_attr != None: raise Exception('duplicated pk not allowed.') pk_attr = key fields_db_projection[key] = value.db_field or key for key in fields_map.keys(): attrs.pop(key) for base in bases: _meta = getattr(base, "_meta", None) if not _meta: continue fields_map.update(deepcopy(_meta.fields_map)) fields_db_projection.update(deepcopy(_meta.fields_db_projection)) if _meta.pk_attr: if pk_attr != None: raise Exception('duplicated pk not allowed.') else: pk_attr = _meta.pk_attr meta.fields_map = fields_map meta.fields_db_projection = fields_db_projection if not fields_map: meta.abstract = True meta.pk_attr = pk_attr or "" attrs["_meta"] = meta new_class = super().__new__(mcs, name, bases, attrs) meta.finalise_model() new_class.check() return new_class class Model(metaclass=ModelMeta): _meta = None class Meta: pass def __init__(self, *args, load_from_db=False, **kwargs) -> None: meta = self._meta self._saved_in_db = load_from_db passed_fields = {*kwargs.keys()} for key, value in kwargs.items(): if key in meta.fields_db_projection: field = meta.fields_map[key] if value is None and not field.null: raise ValueError(f"{key} is non nullable field, but null was passed") setattr(self, key, field.to_python_value(value)) for key in meta.fields.difference(passed_fields): field = meta.fields_map[key] if callable(field.default): setattr(self, key, field.default()) else: setattr(self, key, field.default) self._snapshot_data = {} if self._saved_in_db: self.make_snapshot() @classmethod def _init_from_db(cls, **kwargs): instance = cls(load_from_db=True, **kwargs) return instance def make_snapshot(self): new_data = dict() for key in self._meta.fields_db_projection.keys(): new_data[key] = deepcopy(getattr(self, key)) self._snapshot_data = new_data def to_dict(self): data = dict() for key in self._meta.fields_db_projection.keys(): data[key] = deepcopy(getattr(self, key)) return data def to_jsondict(self): json_data = dict() for key in self._meta.fields_db_projection.keys(): value = deepcopy(getattr(self, key)) if isinstance(value, (datetime.date, datetime.datetime)): json_data[key] = value.isoformat() elif isinstance(value, uuid.UUID): json_data[key] = str(value) else: json_data[key] = value return json_data def to_json(self): return json.dumps(self.to_jsondict()) def changed(self): now_data = dict() for key in self._meta.fields_db_projection.keys(): now_data[key] = getattr(self, key) diff = self.dict_diff(now_data, self._snapshot_data) return diff.keys() def dict_diff(self, first, second): diff = {} for key in first.keys(): if key not in second: diff[key] = (first[key], None) elif (first[key] != second[key]): diff[key] = (first[key], second[key]) return diff def __str__(self) -> str: return "<{}>".format(self.__class__.__name__) def __repr__(self) -> str: if self.pk: return "<{}: {}>".format(self.__class__.__name__, self.pk) return "<{}>".format(self.__class__.__name__) def __hash__(self) -> int: if not self.pk: raise TypeError("Model instances without id are unhashable") return hash(self.pk) def __eq__(self, other) -> bool: if type(self) == type(other) and self.pk == other.pk: return True return False @property def pk(self): return getattr(self, self._meta.pk_attr) @pk.setter def pk(self, value): setattr(self, self._meta.pk_attr, value) def _auto_values(self): for field in self._meta.auto_fields: field.auto_value(self) @classmethod def first(cls): return QuerySet(cls).first() @classmethod def filter(cls, *args, **kwargs): return QuerySet(cls).filter(*args, **kwargs) @classmethod def exclude(cls, *args, **kwargs): return QuerySet(cls).exclude(*args, **kwargs) @classmethod def all(cls): return QuerySet(cls) @classmethod def get(cls, *args, **kwargs): return QuerySet(cls).get(*args, **kwargs) @classmethod def get_or_none(cls, *args, **kwargs): return QuerySet(cls).filter(*args, **kwargs).first() async def save(self, update_fields = None, force=False) -> int: changed = self.changed() if len(changed) == 0: return condition_fields = [] dataver_field_name = self._meta.dataversion_field if not force: self._auto_values() if dataver_field_name and dataver_field_name in self._snapshot_data: condition_fields.append((dataver_field_name, self._snapshot_data[dataver_field_name])) fileds = list(set(update_fields or ()) | set(self.changed())) mapper = self.get_mapper() if self._saved_in_db: ret = await mapper.update(self, update_fields=fileds, condition_fields=condition_fields) if ret == 0: raise StaleObjectError('model save failed.') else: ret = await mapper.insert(self) if ret == 0: raise StaleObjectError('model insert failed.') self._saved_in_db = True self.make_snapshot() async def delete(self) -> int: if not self._saved_in_db: raise OperationalError("Can't delete unpersisted record") mapper = self.get_mapper() return await mapper.delete(self) @classmethod
MIT License
azure/autorest.python
test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models.py
AccessPolicy.__init__
python
def __init__(self, **kwargs): super(AccessPolicy, self).__init__(**kwargs) self.start = kwargs["start"] self.expiry = kwargs["expiry"] self.permission = kwargs["permission"]
:keyword start: Required. the date-time the policy is active. :paramtype start: ~datetime.datetime :keyword expiry: Required. the date-time the policy expires. :paramtype expiry: ~datetime.datetime :keyword permission: Required. the permissions for the acl policy. :paramtype permission: str
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models.py#L38-L50
from azure.core.exceptions import HttpResponseError import msrest.serialization class AccessPolicy(msrest.serialization.Model): _validation = { "start": {"required": True}, "expiry": {"required": True}, "permission": {"required": True}, } _attribute_map = { "start": {"key": "Start", "type": "iso-8601"}, "expiry": {"key": "Expiry", "type": "iso-8601"}, "permission": {"key": "Permission", "type": "str"}, }
MIT License
mitodl/mitx-grading-library
mitxgraders/helpers/calc/robust_pow.py
robust_pow
python
def robust_pow(base, exponent): try: return base ** exponent except ValueError: return np.lib.scimath.power(base, exponent)
Calculates __pow__, and tries other approachs if that doesn't work. Usage: ====== >>> robust_pow(5, 2) 25 >>> robust_pow(0.5, -1) 2.0 If base is negative and power is fractional, complex results are returned: >>> almost_j = robust_pow(-1, 0.5) >>> np.allclose(almost_j, 1j) True
https://github.com/mitodl/mitx-grading-library/blob/5ce94addd91dbd3826b97ae44de6aa146018d455/mitxgraders/helpers/calc/robust_pow.py#L9-L31
from __future__ import print_function, division, absolute_import, unicode_literals import numpy as np
BSD 3-Clause New or Revised License
graviti-ai/tensorbay-python-sdk
tensorbay/utility/attr.py
attr
python
def attr( *, is_dynamic: bool = False, key: Union[str, None, _KeyConverter] = lambda x: x, default: Any = ..., error_message: Optional[str] = None, loader: Optional[Callable[[Any], Any]] = None, dumper: Optional[Callable[[Any], Any]] = None, ) -> Any: if is_dynamic and default is not ...: raise AttrError() return Field( is_dynamic=is_dynamic, key=key, default=default, error_message=error_message, loader=loader, dumper=dumper, )
Return an instance to identify attr fields. Arguments: is_dynamic: Determine if this is a dynamic attr. key: Display value of the attr in contents. default: Default value of the attr. error_message: The custom error message of the attr. loader: The custom loader of the attr. dumper: The custom dumper of the attr. Raises: AttrError: Dynamic attr cannot have default value. Returns: A :class:`Field` instance containing all attr fields.
https://github.com/graviti-ai/tensorbay-python-sdk/blob/db60d259869d6a528ee1ad84103d2b9bab1bd72e/tensorbay/utility/attr.py#L220-L256
from sys import version_info from typing import ( Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, no_type_check, ) from typing_extensions import NoReturn, Protocol from tensorbay.exception import AttrError _T = TypeVar("_T") _Callable = Callable[[Any], Any] _KeyConverter = Callable[[str], str] _BUILTINS = {"builtins", None, "typing"} _DEFAULT_ERROR_MESSAGE = "'{class_name}' object has no attribute '{attr_name}'" _ATTRS_BASE = "_attrs_base" class _A(Protocol): def dumps(self) -> Any: class Field: def __init__( self, *, is_dynamic: bool, key: Union[str, None, _KeyConverter], default: Any, error_message: Optional[str], loader: Optional[_Callable], dumper: Optional[_Callable], ) -> None: if loader: self.loader = loader if dumper: self.dumper = dumper self.is_dynamic = is_dynamic self.default = default if callable(key): self.key_converter = key else: self.key = key if error_message: self.error_message = error_message def __getattr__(self, name: str) -> NoReturn: raise AttributeError( _DEFAULT_ERROR_MESSAGE.format(class_name=self.__class__.__name__, attr_name=name) ) class BaseField: def __init__(self, key: Optional[str]) -> None: self.loader: _Callable self.dumper: _Callable self.key = key class AttrsMixin: _attrs_fields: Dict[str, Field] _attrs_base: Any def __init_subclass__(cls) -> None: type_ = cls.__annotations__.pop(_ATTRS_BASE, None) if type_: cls._attrs_base.loader = type_._loads cls._attrs_base.dumper = getattr(type_, "_dumps", type_.dumps) attrs_fields = {} for base_class in cls.__bases__: base_fields = getattr(base_class, "_attrs_fields", None) if base_fields: attrs_fields.update(base_fields) for name, type_ in getattr(cls, "__annotations__", {}).items(): field = getattr(cls, name, None) if isinstance(field, Field): need_loader = not hasattr(field, "loader") need_dumper = not hasattr(field, "dumper") if need_loader or need_dumper: loader, dumper = _get_operators(type_) if need_loader: field.loader = loader if need_dumper: field.dumper = dumper if hasattr(field, "key_converter"): field.key = field.key_converter(name) attrs_fields[name] = field delattr(cls, name) cls._attrs_fields = attrs_fields def __eq__(self, other: object) -> bool: if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ @no_type_check def __getattr__(self, name: str) -> NoReturn: error_message = getattr( self._attrs_fields.get(name), "error_message", _DEFAULT_ERROR_MESSAGE ) raise AttributeError( error_message.format(class_name=self.__class__.__name__, attr_name=name) ) def _loads(self, contents: Any) -> None: base = getattr(self, _ATTRS_BASE, None) if base: value = contents if base.key is None else contents[base.key] base.loader(self, value) for name, field in self._attrs_fields.items(): if field.is_dynamic and field.key not in contents: continue if field.key is None: value = contents elif field.default is not ...: value = contents.get(field.key, field.default) else: value = contents[field.key] setattr(self, name, field.loader(value)) def _dumps(self) -> Dict[str, Any]: contents: Dict[str, Any] = {} base = getattr(self, _ATTRS_BASE, None) if base: _key_dumper(base.key, contents, base.dumper(self)) for name, field in self._attrs_fields.items(): if field.is_dynamic and not hasattr(self, name): continue value = getattr(self, name) if value == field.default: continue _key_dumper(field.key, contents, field.dumper(value)) return contents
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/haveibeenpwned.py
setup_platform
python
def setup_platform(hass, config, add_devices, discovery_info=None): emails = config.get(CONF_EMAIL) data = HaveIBeenPwnedData(emails) devices = [] for email in emails: devices.append(HaveIBeenPwnedSensor(data, hass, email)) add_devices(devices) for sensor in devices: sensor.update_nothrottle()
Set up the HaveIBeenPwned sensor.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/haveibeenpwned.py#L39-L53
from datetime import timedelta import logging from aiohttp.hdrs import USER_AGENT import requests import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_EMAIL import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import track_point_in_time from homeassistant.util import Throttle import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S" HA_USER_AGENT = "Home Assistant HaveIBeenPwned Sensor Component" MIN_TIME_BETWEEN_FORCED_UPDATES = timedelta(seconds=5) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15) URL = 'https://haveibeenpwned.com/api/v2/breachedaccount/' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_EMAIL): vol.All(cv.ensure_list, [cv.string]), })
MIT License
mgear-dev/mgear_core
scripts/mgear/core/log.py
matrix4
python
def matrix4(m, msg="matrix4"): s = msg + " : \n" + "| %s , %s , %s , %s |\n" % (m[0][0], m[0][1], m[0][2], m[0][3]) + "| %s , %s , %s , %s |\n" % (m[1][0], m[1][1], m[1][2], m[1][3]) + "| %s , %s , %s , %s |\n" % (m[2][0], m[2][1], m[2][2], m[2][3]) + "| %s , %s , %s , %s |" % (m[3][0], m[3][1], m[3][2], m[3][3]) print(s)
Print matrix 4x4 data. Arguments: m (matrix): 4x4 Matrix msg (str): Message in front of the data print.
https://github.com/mgear-dev/mgear_core/blob/bb450fda44ff79c57f5f73d5a58c97a6b5c5d848/scripts/mgear/core/log.py#L4-L17
MIT License
opentoontowntools/openleveleditor
toontown/toon/Toon.py
unloadSellbotHQAnims
python
def unloadSellbotHQAnims(): loadPhaseAnims("phase_9", 0)
Unload the sellbot hq specific Toon anims
https://github.com/opentoontowntools/openleveleditor/blob/250c0c9b8e4dde406a7a9b921db59deb7b960ca9/toontown/toon/Toon.py#L454-L458
from otp.avatar import Avatar from . import ToonDNA from direct.task.Task import Task from direct.actor import Actor import string from .ToonHead import * from panda3d.core import * from direct.interval.IntervalGlobal import * from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from otp.otpbase import OTPLocalizer from toontown.toonbase import TTLocalizer import random from . import TTEmote from otp.avatar import Emote from . import Motion from otp.otpbase import OTPGlobals from direct.showbase.PythonUtil import Functor import types SLEEP_STRING = TTLocalizer.ToonSleepString DogDialogueArray = [] CatDialogueArray = [] HorseDialogueArray = [] RabbitDialogueArray = [] MouseDialogueArray = [] DuckDialogueArray = [] MonkeyDialogueArray = [] BearDialogueArray = [] PigDialogueArray = [] LegsAnimDict = {} TorsoAnimDict = {} HeadAnimDict = {} Preloaded = [] Phase3AnimList = ( ("neutral", "neutral"), ("run", "run"), ) Phase3_5AnimList = ( ("walk", "walk"), ("teleport", "teleport"), ("book", "book"), ("jump", "jump"), ("running-jump", "running-jump"), ("jump-squat", "jump-zstart"), ("jump-idle", "jump-zhang"), ("jump-land", "jump-zend"), ("running-jump-squat", "leap_zstart"), ("running-jump-idle", "leap_zhang"), ("running-jump-land", "leap_zend"), ("pushbutton", "press-button"), ("throw", "pie-throw"), ("victory", "victory-dance"), ("sidestep-left", "sidestep-left"), ("conked", "conked"), ("cringe", "cringe"), ("wave", "wave"), ("shrug", "shrug"), ("angry", "angry"), ("tutorial-neutral", "tutorial-neutral"), ("left-point", "left-point"), ("right-point", "right-point"), ("right-point-start", "right-point-start"), ("give-props", "give-props"), ("give-props-start", "give-props-start"), ("right-hand", "right-hand"), ("right-hand-start", "right-hand-start"), ("duck", "duck"), ("sidestep-right", "jump-back-right"), ("periscope", "periscope"), ) Phase4AnimList = ( ("sit", "sit"), ("sit-start", "intoSit"), ("swim", "swim"), ("tug-o-war", "tug-o-war"), ("sad-walk", "losewalk"), ("sad-neutral", "sad-neutral"), ("up", "up"), ("down", "down"), ("left", "left"), ("right", "right"), ("applause", "applause"), ("confused", "confused"), ("bow", "bow"), ("curtsy", "curtsy"), ("bored", "bored"), ("think", "think"), ("battlecast", "fish"), ("cast", "cast"), ("castlong", "castlong"), ("fish-end", "fishEND"), ("fish-neutral", "fishneutral"), ("fish-again", "fishAGAIN"), ("reel", "reel"), ("reel-H", "reelH"), ("reel-neutral", "reelneutral"), ("pole", "pole"), ("pole-neutral", "poleneutral"), ("slip-forward", "slip-forward"), ("slip-backward", "slip-backward"), ("catch-neutral", "gameneutral"), ("catch-run", "gamerun"), ("catch-eatneutral", "eat_neutral"), ("catch-eatnrun", "eatnrun"), ("catch-intro-throw", "gameThrow"), ('swing', 'swing'), ("pet-start", "petin"), ("pet-loop", "petloop"), ("pet-end", "petend"), ("scientistJealous", "scientistJealous"), ("scientistEmcee", "scientistEmcee"), ("scientistWork", "scientistWork"), ("scientistGame", "scientistGame"), ) Phase5AnimList = ( ("water-gun", "water-gun"), ("hold-bottle", "hold-bottle"), ("firehose", "firehose"), ("spit", "spit"), ("tickle", "tickle"), ("smooch", "smooch"), ("happy-dance", "happy-dance"), ("sprinkle-dust", "sprinkle-dust"), ("juggle", "juggle"), ("climb", "climb"), ("sound", "shout"), ("toss", "toss"), ("hold-magnet", "hold-magnet"), ("hypnotize", "hypnotize"), ("struggle", "struggle"), ("lose", "lose"), ("melt", "melt"), ) Phase5_5AnimList = ( ("takePhone", "takePhone"), ("phoneNeutral", "phoneNeutral"), ("phoneBack", "phoneBack"), ("bank", "jellybeanJar"), ("callPet", "callPet"), ("feedPet", "feedPet"), ("start-dig", "into_dig"), ("loop-dig", "loop_dig"), ("water", "water"), ) Phase6AnimList = ( ("headdown-putt","headdown-putt"), ("into-putt","into-putt"), ("loop-putt","loop-putt"), ("rotateL-putt","rotateL-putt"), ("rotateR-putt","rotateR-putt"), ("swing-putt","swing-putt"), ("look-putt","look-putt"), ("lookloop-putt","lookloop-putt"), ("bad-putt","bad-putt"), ("badloop-putt","badloop-putt"), ("good-putt","good-putt"), ) Phase9AnimList = ( ("push", "push"), ) Phase10AnimList = ( ("leverReach", "leverReach"), ("leverPull", "leverPull"), ("leverNeutral", "leverNeutral"), ) Phase12AnimList = ( ) if not base.config.GetBool('want-new-anims', 1): LegDict = { "s":"/models/char/dogSS_Shorts-legs-", "m":"/models/char/dogMM_Shorts-legs-", "l":"/models/char/dogLL_Shorts-legs-" } TorsoDict = { "s":"/models/char/dogSS_Naked-torso-", "m":"/models/char/dogMM_Naked-torso-", "l":"/models/char/dogLL_Naked-torso-", "ss":"/models/char/dogSS_Shorts-torso-", "ms":"/models/char/dogMM_Shorts-torso-", "ls":"/models/char/dogLL_Shorts-torso-", "sd":"/models/char/dogSS_Skirt-torso-", "md":"/models/char/dogMM_Skirt-torso-", "ld":"/models/char/dogLL_Skirt-torso-" } else: LegDict = { "s":"/models/char/tt_a_chr_dgs_shorts_legs_", "m":"/models/char/tt_a_chr_dgm_shorts_legs_", "l":"/models/char/tt_a_chr_dgl_shorts_legs_" } TorsoDict = { "s":"/models/char/dogSS_Naked-torso-", "m":"/models/char/dogMM_Naked-torso-", "l":"/models/char/dogLL_Naked-torso-", "ss":"/models/char/tt_a_chr_dgs_shorts_torso_", "ms":"/models/char/tt_a_chr_dgm_shorts_torso_", "ls":"/models/char/tt_a_chr_dgl_shorts_torso_", "sd":"/models/char/tt_a_chr_dgs_skirt_torso_", "md":"/models/char/tt_a_chr_dgm_skirt_torso_", "ld":"/models/char/tt_a_chr_dgl_skirt_torso_" } def loadModels(): preloadAvatars = base.config.GetBool("preload-avatars", 0) if preloadAvatars: global Preloaded def loadTex(path): tex = loader.loadTexture(path) tex.setMinfilter(Texture.FTLinearMipmapLinear) tex.setMagfilter(Texture.FTLinear) Preloaded.append(tex) for shirt in ToonDNA.Shirts: loadTex(shirt) for sleeve in ToonDNA.Sleeves: loadTex(sleeve) for short in ToonDNA.BoyShorts: loadTex(short) for bottom in ToonDNA.GirlBottoms: loadTex(bottom[0]) for key in list(LegDict.keys()): fileRoot = LegDict[key] model = loader.loadModelNode("phase_3" + fileRoot + "1000") Preloaded.append(model) model = loader.loadModelNode("phase_3" + fileRoot + "500") Preloaded.append(model) model = loader.loadModelNode("phase_3" + fileRoot + "250") Preloaded.append(model) for key in list(TorsoDict.keys()): fileRoot = TorsoDict[key] model = loader.loadModelNode("phase_3" + fileRoot + "1000") Preloaded.append(model) if (len(key) > 1): model = loader.loadModelNode("phase_3" + fileRoot + "500") Preloaded.append(model) model = loader.loadModelNode("phase_3" + fileRoot + "250") Preloaded.append(model) for key in list(HeadDict.keys()): fileRoot = HeadDict[key] model = loader.loadModelNode("phase_3" + fileRoot + "1000") Preloaded.append(model) model = loader.loadModelNode("phase_3" + fileRoot + "500") Preloaded.append(model) model = loader.loadModelNode("phase_3" + fileRoot + "250") Preloaded.append(model) def loadBasicAnims(): loadPhaseAnims() def unloadBasicAnims(): loadPhaseAnims(0) def loadTutorialBattleAnims(): loadPhaseAnims("phase_3.5") def unloadTutorialBattleAnims(): loadPhaseAnims("phase_3.5", 0) def loadMinigameAnims(): loadPhaseAnims("phase_4") def unloadMinigameAnims(): loadPhaseAnims("phase_4", 0) def loadBattleAnims(): loadPhaseAnims("phase_5") def unloadBattleAnims(): loadPhaseAnims("phase_5", 0) def loadSellbotHQAnims(): loadPhaseAnims("phase_9")
MIT License