repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
napalm-automation/napalm-yang
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py
state._get_virtual_link_local
python
def _get_virtual_link_local(self): return self.__virtual_link_local
Getter method for virtual_link_local, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/state/virtual_link_local (inet:ip-address) YANG Description: For VRRP on IPv6 interfaces, sets the virtual link local address
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py#L928-L935
from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): __slots__ = ( "_path_helper", "_extmethods", "__virtual_router_id", "__virtual_address", "__priority", "__preempt", "__preempt_delay", "__accept_mode", "__advertisement_interval", "__current_priority", "__virtual_link_local", ) _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__accept_mode = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__advertisement_interval = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__current_priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__virtual_link_local = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ], is_leaf=True, yang_name="virtual-link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "interfaces", "interface", "subinterfaces", "subinterface", "ipv6", "addresses", "address", "vrrp", "vrrp-group", "state", ] def _get_virtual_router_id(self): return self.__virtual_router_id def _set_virtual_router_id(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_router_id must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__virtual_router_id = t if hasattr(self, "_set"): self._set() def _unset_virtual_router_id(self): self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_virtual_address(self): return self.__virtual_address def _set_virtual_address(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_address must be of a type compatible with inet:ip-address""", "defined-type": "inet:ip-address", "generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ip-address', is_config=False)""", } ) self.__virtual_address = t if hasattr(self, "_set"): self._set() def _unset_virtual_address(self): self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) def _get_priority(self): return self.__priority def _set_priority(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _unset_priority(self): self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_preempt(self): return self.__preempt def _set_preempt(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__preempt = t if hasattr(self, "_set"): self._set() def _unset_preempt(self): self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) def _get_preempt_delay(self): return self.__preempt_delay def _set_preempt_delay(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16, ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt_delay must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..3600']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(0), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""", } ) self.__preempt_delay = t if hasattr(self, "_set"): self._set() def _unset_preempt_delay(self): self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) def _get_accept_mode(self): return self.__accept_mode def _set_accept_mode(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """accept_mode must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__accept_mode = t if hasattr(self, "_set"): self._set() def _unset_accept_mode(self): self.__accept_mode = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) def _get_advertisement_interval(self): return self.__advertisement_interval def _set_advertisement_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16, ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """advertisement_interval must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4095']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(100), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""", } ) self.__advertisement_interval = t if hasattr(self, "_set"): self._set() def _unset_advertisement_interval(self): self.__advertisement_interval = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) def _get_current_priority(self): return self.__current_priority def _set_current_priority(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """current_priority must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__current_priority = t if hasattr(self, "_set"): self._set() def _unset_current_priority(self): self.__current_priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, )
Apache License 2.0
hiwonjoon/icml2019-trex
atari/agc_demos.py
StackFrames
python
def StackFrames(frames): import copy stacked = [] stacked_obs = np.zeros((84,84,4)) for i in range(len(frames)): if i >= 3: stacked_obs[:,:,0] = frames[i-3] stacked_obs[:,:,1] = frames[i-2] stacked_obs[:,:,2] = frames[i-1] stacked_obs[:,:,3] = frames[i] stacked.append(np.expand_dims(copy.deepcopy(stacked_obs),0)) return stacked
stack every four frames to make an observation (84,84,4)
https://github.com/hiwonjoon/icml2019-trex/blob/44f92b61ca6c79ac22d468382d4f2fbee164fb7a/atari/agc_demos.py#L45-L57
import agc.dataset as ds import agc.util as util import numpy as np from os import path, listdir import cv2 cv2.ocl.setUseOpenCL(False) import argparse from baselines.common.trex_utils import preprocess def GrayScaleWarpImage(image): width=84 height=84 frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA) return frame def MaxSkipAndWarpFrames(trajectory_dir): num_frames = len(listdir(trajectory_dir)) skip=4 sample_pic = np.random.choice(listdir(trajectory_dir)) image_path = path.join(trajectory_dir, sample_pic) pic = cv2.imread(image_path) obs_buffer = np.zeros((2,)+pic.shape, dtype=np.uint8) max_frames = [] for i in range(num_frames): if i % skip == skip - 2: obs = cv2.imread(path.join(trajectory_dir, str(i) + ".png")) obs_buffer[0] = obs if i % skip == skip - 1: obs = cv2.imread(path.join(trajectory_dir, str(i) + ".png")) obs_buffer[1] = obs image = obs_buffer.max(axis=0) warped = GrayScaleWarpImage(image) max_frames.append(warped) return max_frames
MIT License
sphinx-toolbox/sphinx-toolbox
sphinx_toolbox/github/repos_and_users.py
user_role
python
def user_role( name: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: Dict[str, Any] = {}, content: List[str] = [] ) -> Tuple[List[nodes.reference], List[system_message]]: has_t, text, username = split_explicit_title(text) username = nodes.unescape(username) messages: List[system_message] = [] if has_t: refnode = nodes.reference( text, text, refuri=str(GITHUB_COM / username), ) else: refnode = GitHubObjectLinkNode( name=f"@{username}", refuri=GITHUB_COM / username, ) return [refnode], messages
Adds a link to the given user / organization on GitHub. :param name: The local name of the interpreted role, the role name actually used in the document. :param rawtext: A string containing the entire interpreted text input, including the role and markup. :param text: The interpreted text content. :param lineno: The line number where the interpreted text begins. :param inliner: The :class:`docutils.parsers.rst.states.Inliner` object that called :func:`~.user_role`. It contains the several attributes useful for error reporting and document tree access. :param options: A dictionary of directive options for customization (from the ``role`` directive), to be interpreted by the function. Used for additional attributes for the generated elements and other functionality. :param content: A list of strings, the directive content for customization (from the ``role`` directive). To be interpreted by the function. :return: A list containing the created node, and a list containing any messages generated during the function. .. clearpage::
https://github.com/sphinx-toolbox/sphinx-toolbox/blob/cee88c6bceac20a9ae0e381ada2fb2453ca3fc0b/sphinx_toolbox/github/repos_and_users.py#L145-L192
from typing import Any, Dict, List, Tuple, Union from apeye.url import URL from bs4 import BeautifulSoup from docutils import nodes from docutils.nodes import system_message from docutils.parsers.rst.states import Inliner from sphinx.util.nodes import split_explicit_title from sphinx.writers.html import HTMLTranslator from sphinx.writers.latex import LaTeXTranslator from sphinx_toolbox.utils import GITHUB_COM, make_github_url __all__ = [ "GitHubObjectLinkNode", "repository_role", "user_role", "visit_github_object_link_node", "depart_github_object_link_node", ] class GitHubObjectLinkNode(nodes.reference): name: str url: str def __init__( self, name: str, refuri: Union[str, URL], **kwargs, ): self.name = str(name) self.url = str(refuri) super().__init__(self.name, self.name, refuri=self.url) def copy(self) -> "GitHubObjectLinkNode": obj = self.__class__(self.name, self.url) obj.document = self.document obj.source = self.source obj.line = self.line return obj def repository_role( name: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: Dict[str, Any] = {}, content: List[str] = [] ) -> Tuple[List[nodes.reference], List[system_message]]: has_t, text, repo_name = split_explicit_title(text) repo_name = nodes.unescape(repo_name) repository_parts = nodes.unescape(repo_name).split('/') if len(repository_parts) != 2: return [], [inliner.document.reporter.warning(f"Invalid repository '{repo_name}'.")] if has_t: refnode = nodes.reference( text, text, refuri=str(make_github_url(*repository_parts)), ) else: refnode = GitHubObjectLinkNode( name=repo_name, refuri=make_github_url(*repository_parts), ) return [refnode], []
MIT License
ha0y/xiaomi_miot_raw
custom_components/xiaomi_miot_raw/light.py
MiotSubLight.brightness
python
def brightness(self): try: return self.convert_value(self.device_state_attributes[self._did_prefix + 'brightness'],"brightness",False,self._ctrl_params['brightness']['value_range']) except: return None
Return the brightness of the light.
https://github.com/ha0y/xiaomi_miot_raw/blob/6fe412b3fbb7fca11f0faf518c654c84228412cf/custom_components/xiaomi_miot_raw/light.py#L235-L240
import asyncio import logging from functools import partial from datetime import timedelta import json from collections import OrderedDict import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, LightEntity) from homeassistant.const import * from homeassistant.exceptions import PlatformNotReady from homeassistant.util import color from miio.exceptions import DeviceException from .deps.miio_new import MiotDevice from .basic_dev_class import ( GenericMiotDevice, ToggleableMiotDevice, MiotSubDevice, MiotSubToggleableDevice, MiotIRDevice, ) from . import async_generic_setup_platform from .deps.const import ( DOMAIN, CONF_UPDATE_INSTANT, CONF_MAPPING, CONF_CONTROL_PARAMS, CONF_CLOUD, CONF_MODEL, ATTR_STATE_VALUE, ATTR_MODEL, ATTR_FIRMWARE_VERSION, ATTR_HARDWARE_VERSION, SCHEMA, MAP, DUMMY_IP, DUMMY_TOKEN, ) import copy TYPE = 'light' _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=10) DEFAULT_NAME = "Generic MIoT " + TYPE DATA_KEY = TYPE + '.' + DOMAIN PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( SCHEMA ) @asyncio.coroutine async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): await async_generic_setup_platform( hass, config, async_add_devices, discovery_info, TYPE, {'default': MiotLight, '_ir_light': MiotIRLight}, {'default': MiotSubLight} ) async def async_setup_entry(hass, config_entry, async_add_entities): config = copy.copy(hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data))) await async_setup_platform(hass, config, async_add_entities) class MiotLight(ToggleableMiotDevice, LightEntity): def __init__(self, device, config, device_info, hass, main_mi_type): ToggleableMiotDevice.__init__(self, device, config, device_info, hass, main_mi_type) self._brightness = None self._color = None self._color_temp = None self._effect = None hass.async_add_job(self.create_sub_entities) @property def supported_features(self): s = 0 if self._did_prefix + 'brightness' in self._mapping: s |= SUPPORT_BRIGHTNESS if self._did_prefix + 'color_temperature' in self._mapping: s |= SUPPORT_COLOR_TEMP if self._did_prefix + 'mode' in self._mapping: s |= SUPPORT_EFFECT if self._did_prefix + 'color' in self._mapping: s |= SUPPORT_COLOR return s @property def brightness(self): return self._brightness async def async_turn_on(self, **kwargs): parameters = [] if 'switch_status' in self._ctrl_params: parameters.append({**{'did': self._did_prefix + "switch_status", 'value': self._ctrl_params['switch_status']['power_on']},**(self._mapping[self._did_prefix + 'switch_status'])}) elif 'brightness' in self._ctrl_params and ATTR_BRIGHTNESS not in kwargs: parameters.append({**{'did': self._did_prefix + "brightness", 'value': self._ctrl_params['brightness']['value_range'][-2]}, **(self._mapping[self._did_prefix + 'brightness'])}) if ATTR_EFFECT in kwargs: modes = self._ctrl_params['mode'] parameters.append({**{'did': self._did_prefix + "mode", 'value': self._ctrl_params['mode'].get(kwargs[ATTR_EFFECT])}, **(self._mapping[self._did_prefix + 'mode'])}) if ATTR_BRIGHTNESS in kwargs: self._effect = None parameters.append({**{'did': self._did_prefix + "brightness", 'value': self.convert_value(kwargs[ATTR_BRIGHTNESS],"brightness", True, self._ctrl_params['brightness']['value_range'])}, **(self._mapping[self._did_prefix + 'brightness'])}) if ATTR_COLOR_TEMP in kwargs: self._effect = None valuerange = self._ctrl_params['color_temperature']['value_range'] ct = self.convert_value(kwargs[ATTR_COLOR_TEMP], "color_temperature") ct = valuerange[0] if ct < valuerange[0] else valuerange[1] if ct > valuerange[1] else ct parameters.append({**{'did': self._did_prefix + "color_temperature", 'value': ct}, **(self._mapping[self._did_prefix + 'color_temperature'])}) if ATTR_HS_COLOR in kwargs: self._effect = None intcolor = self.convert_value(kwargs[ATTR_HS_COLOR],'color') parameters.append({**{'did': self._did_prefix + "color", 'value': intcolor}, **(self._mapping[self._did_prefix + 'color'])}) result = await self.set_property_new(multiparams = parameters) if result: self._state = True self.async_write_ha_state() async def async_turn_off(self, **kwargs): if 'switch_status' in self._ctrl_params: prm = self._ctrl_params['switch_status']['power_off'] result = await self.set_property_new(self._did_prefix + "switch_status",prm) elif 'brightness' in self._ctrl_params: prm = self._ctrl_params['brightness']['value_range'][0] result = await self.set_property_new(self._did_prefix + "brightness",prm) else: raise NotImplementedError() if result: self._state = False self.async_write_ha_state() @property def color_temp(self): return self._color_temp @property def min_mireds(self): try: return self.convert_value(self._ctrl_params['color_temperature']['value_range'][1], "color_temperature") or 1 except KeyError: return None @property def max_mireds(self): try: return self.convert_value(self._ctrl_params['color_temperature']['value_range'][0], "color_temperature") or 100 except KeyError: return None @property def effect_list(self): return list(self._ctrl_params['mode'].keys()) @property def effect(self): return self._effect @property def hs_color(self): return self._color def _handle_platform_specific_attrs(self): super()._handle_platform_specific_attrs() try: self._brightness = self.convert_value(self._state_attrs[self._did_prefix + 'brightness'],"brightness",False,self._ctrl_params['brightness']['value_range']) except KeyError: pass try: self._color = self.convert_value(self._state_attrs[self._did_prefix + 'color'],"color",False) except KeyError: pass try: self._color_temp = self.convert_value(self._state_attrs[self._did_prefix + 'color_temperature'], "color_temperature") or 100 except KeyError: pass try: self._state_attrs.update({'color_temperature': self._state_attrs[self._did_prefix + 'color_temperature']}) except KeyError: pass try: self._state_attrs.update({'mode': self._state_attrs['mode']}) except KeyError: pass try: self._effect = self.get_key_by_value(self._ctrl_params['mode'],self._state_attrs[self._did_prefix + 'mode']) except KeyError: self._effect = None class MiotSubLight(MiotSubToggleableDevice, LightEntity): def __init__(self, parent_device, mapping, params, mitype): super().__init__(parent_device, mapping, params, mitype) self._brightness = None self._color = None self._color_temp = None self._effect = None @property def supported_features(self): s = 0 if 'brightness' in self._mapping: s |= SUPPORT_BRIGHTNESS if 'color_temperature' in self._mapping: s |= SUPPORT_COLOR_TEMP if 'mode' in self._mapping: s |= SUPPORT_EFFECT if 'color' in self._mapping: s |= SUPPORT_COLOR return s @property
Apache License 2.0
dirty-cat/dirty_cat
dirty_cat/datasets/fetching.py
fetch_midwest_survey
python
def fetch_midwest_survey(load_dataframe: bool = True) -> namedtuple: return fetch_dataset_as_namedtuple( dataset_id=MIDWEST_SURVEY_ID, target='Census_Region', read_csv_kwargs={ 'quotechar': "'", 'escapechar': '\\', }, load_dataframe=load_dataframe, )
Fetches the midwest survey dataset. See Also -------- dirty_cat.datasets.fetch_dataset_as_namedtuple : additional information
https://github.com/dirty-cat/dirty_cat/blob/3aeb866a11b869a360155043ede6868c7dfb8306/dirty_cat/datasets/fetching.py#L440-L455
import gzip import json import sklearn import warnings import pandas as pd from pathlib import Path from collections import namedtuple from distutils.version import LooseVersion from dirty_cat.datasets.utils import get_data_dir Details = namedtuple("Details", ["name", "file_id", "description"]) Features = namedtuple("Features", ["names"]) DatasetAll = namedtuple('Dataset', ('description', 'X', 'y', 'source', 'path')) DatasetInfoOnly = namedtuple('Dataset', ('description', 'source', 'path', 'read_csv_kwargs')) DETAILS_DIRECTORY = "openml/openml.org/api/v1/json/data/" FEATURES_DIRECTORY = "openml/openml.org/api/v1/json/data/features/" DATA_DIRECTORY = "openml/openml.org/data/v1/download/" openml_url = "https://www.openml.org/d/{ID}" ROAD_SAFETY_ID = 42803 OPEN_PAYMENTS_ID = 42738 MIDWEST_SURVEY_ID = 42805 MEDICAL_CHARGE_ID = 42720 EMPLOYEE_SALARIES_ID = 42125 TRAFFIC_VIOLATIONS_ID = 42132 DRUG_DIRECTORY_ID = 43044 def fetch_openml_dataset(dataset_id: int, data_directory: Path = get_data_dir()) -> dict: data_directory = data_directory.resolve() details_gz_path = data_directory / DETAILS_DIRECTORY / f'{dataset_id}.gz' features_gz_path = data_directory / FEATURES_DIRECTORY / f'{dataset_id}.gz' if not details_gz_path.is_file() or not features_gz_path.is_file(): warnings.warn( f"Could not find the dataset {dataset_id} locally. " "Downloading it from OpenML; this might take a while... " "If it is interrupted, some files might be invalid/incomplete: " "if on the following run, the fetching raises errors, you can try " f"fixing this issue by deleting the directory {data_directory!r}." ) _download_and_write_openml_dataset(dataset_id=dataset_id, data_directory=data_directory) details = _get_details(details_gz_path) file_id = details.file_id csv_path = data_directory / f'{details.name}.csv' data_gz_path = data_directory / DATA_DIRECTORY / f'{file_id}.gz' if not data_gz_path.is_file(): _download_and_write_openml_dataset(dataset_id=dataset_id, data_directory=data_directory) if not csv_path.is_file(): features = _get_features(features_gz_path) _export_gz_data_to_csv(data_gz_path, csv_path, features) url = openml_url.format(ID=dataset_id) return { "description": details.description, "source": url, "path": csv_path.resolve() } def _download_and_write_openml_dataset(dataset_id: int, data_directory: Path) -> None: from sklearn.datasets import fetch_openml fetch_kwargs = {} if LooseVersion(sklearn.__version__) >= LooseVersion('0.22'): fetch_kwargs.update({'as_frame': True}) fetch_openml( data_id=dataset_id, data_home=str(data_directory), **fetch_kwargs ) def _read_json_from_gz(compressed_dir_path: Path) -> dict: if not compressed_dir_path.is_file(): raise FileNotFoundError(f"Couldn't find file {compressed_dir_path!s}") with gzip.open(compressed_dir_path, mode='rt') as gz: content = gz.read() details_json = json.JSONDecoder().decode(content) return details_json def _get_details(compressed_dir_path: Path) -> Details: details = _read_json_from_gz(compressed_dir_path)["data_set_description"] f_details = { "name": details["name"], "file_id": details["file_id"], "description": details["description"], } return Details(*f_details.values()) def _get_features(compressed_dir_path: Path) -> Features: raw_features = _read_json_from_gz(compressed_dir_path)["data_features"] features = { "names": [column["name"] for column in raw_features["feature"]] } return Features(*features.values()) def _export_gz_data_to_csv(compressed_dir_path: Path, destination_file: Path, features: Features) -> None: atdata_found = False with destination_file.open(mode="w", encoding='utf8') as csv: with gzip.open(compressed_dir_path, mode="rt", encoding='utf8') as gz: csv.write(_features_to_csv_format(features)) csv.write("\n") for line in gz.readlines(): if not atdata_found: if line.lower().startswith("@data"): atdata_found = True else: csv.write(line) def _features_to_csv_format(features: Features) -> str: return ",".join(features.names) def fetch_dataset_as_namedtuple(dataset_id: int, target: str, read_csv_kwargs: dict, load_dataframe: bool) -> namedtuple: info = fetch_openml_dataset(dataset_id) if load_dataframe: df = pd.read_csv(info['path'], **read_csv_kwargs) y = df[target] X = df.drop(target, axis='columns') dataset = DatasetAll( description=info['description'], X=X, y=y, source=info['source'], path=info['path'], ) else: dataset = DatasetInfoOnly( description=info['description'], source=info['source'], path=info['path'], read_csv_kwargs=read_csv_kwargs, ) return dataset def fetch_employee_salaries(load_dataframe: bool = True, drop_linked: bool = True, drop_irrelevant: bool = True) -> namedtuple: dataset = fetch_dataset_as_namedtuple( dataset_id=EMPLOYEE_SALARIES_ID, target='current_annual_salary', read_csv_kwargs={ 'quotechar': "'", 'escapechar': '\\', 'na_values': ['?'], }, load_dataframe=load_dataframe, ) if load_dataframe: if drop_linked: dataset.X.drop(["2016_gross_pay_received", "2016_overtime_pay"], axis=1, inplace=True) if drop_irrelevant: dataset.X.drop(["full_name"], axis=1, inplace=True) return dataset def fetch_road_safety(load_dataframe: bool = True) -> namedtuple: return fetch_dataset_as_namedtuple( dataset_id=ROAD_SAFETY_ID, target='Sex_of_Driver', read_csv_kwargs={ 'na_values': ['?'], }, load_dataframe=load_dataframe, ) def fetch_medical_charge(load_dataframe: bool = True) -> namedtuple: return fetch_dataset_as_namedtuple( dataset_id=MEDICAL_CHARGE_ID, target='Average_Total_Payments', read_csv_kwargs={ 'quotechar': "'", 'escapechar': '\\', }, load_dataframe=load_dataframe, )
BSD 3-Clause New or Revised License
jyveapp/django-action-framework
daf/urls.py
get_url_patterns
python
def get_url_patterns( interfaces, *, path_prefix='', name_prefix='', interface_kwargs=None ): interface_kwargs = interface_kwargs or {} return [ urls.path( f'{path_prefix}{interface.url_path}/', interface.as_interface(**interface_kwargs), name=f'{name_prefix}{interface.url_name}', ) for interface in interfaces ]
Generate URL patterns for interfaces.
https://github.com/jyveapp/django-action-framework/blob/c3d32064a32444c74238ded7a47c5e70a13f7278/daf/urls.py#L4-L19
from django import urls
BSD 3-Clause New or Revised License
alterway/anonymization
anonymization/Anonymization.py
AnonymizerChain.pseudonymize
python
def pseudonymize(self, text: str) -> str: dmp = diff_match_patch() clean = self.anonymize(text) diff = dmp.diff_main(clean, text) patch = dmp.patch_make(clean, text, diff) return clean, patch
Run all registered anonymizes on a text and return also the diff patch
https://github.com/alterway/anonymization/blob/57e6c20f8c97e902f3513b5adfdbc211791aaef0/anonymization/Anonymization.py#L99-L108
from collections import defaultdict from typing import Iterable, Pattern, Callable, List, Any import re from faker import Factory from .lib.diff_match_patch import diff_match_patch class Anonymization: def __init__(self, locale: str): self.locale = locale self.faker = Factory.create(locale) self.anonDicts = {} def getFake(self, provider: str, match: str) -> str: if not provider in self.anonDicts: self.anonDicts[provider] = defaultdict(getattr(self.faker, provider)) return self.anonDicts[provider][match] def replace_all(self, text: str, matchs: Iterable[str], provider: str) -> str: for match in matchs: text = text.replace(match, self.getFake(provider, match)) return text def regex_anonymizer(self, text: str, regex: Pattern, provider: str) -> str: matchs = re.findall(regex, text) return self.replace_all(text, matchs, provider) def add_provider(self, provider): return self.faker.add_provider(provider) class AnonymizerChain: def __init__(self, anonymization: Anonymization): self.anonymization = anonymization self._anonymizers = [] def add_anonymizers(self, *args: Iterable[Callable[[Anonymization], Any]]) -> None: for arg in args: self._anonymizers.append(arg(self.anonymization)) def clear_anonymizers(self) -> None: self._anonymizers = [] def anonymize(self, text: str) -> str: for anonymizer in self._anonymizers: text = anonymizer.anonymize(text) return text def anonymize_all(self, texts: Iterable[str]) -> List[str]: return [self.anonymize(text) for text in texts] def evaluate(self, text: str) -> str: result = [] for anonymizer in self._anonymizers: result += anonymizer.evaluate(text) return result
MIT License
dhermes/bezier
docs/make_images.py
triangle_constructor
python
def triangle_constructor(triangle): if NO_IMAGES: return ax = triangle.plot(256, color=BLUE, with_nodes=True) line = ax.lines[0] nodes = triangle._nodes add_patch(ax, nodes[:, (0, 1, 2, 5)], line.get_color()) delta = 1.0 / 32.0 ax.text( nodes[0, 0], nodes[1, 0], r"$v_0$", fontsize=20, verticalalignment="top", horizontalalignment="right", ) ax.text( nodes[0, 1], nodes[1, 1], r"$v_1$", fontsize=20, verticalalignment="top", horizontalalignment="center", ) ax.text( nodes[0, 2], nodes[1, 2], r"$v_2$", fontsize=20, verticalalignment="top", horizontalalignment="left", ) ax.text( nodes[0, 3] - delta, nodes[1, 3], r"$v_3$", fontsize=20, verticalalignment="center", horizontalalignment="right", ) ax.text( nodes[0, 4] + delta, nodes[1, 4], r"$v_4$", fontsize=20, verticalalignment="center", horizontalalignment="left", ) ax.text( nodes[0, 5], nodes[1, 5] + delta, r"$v_5$", fontsize=20, verticalalignment="bottom", horizontalalignment="center", ) ax.axis("scaled") ax.set_xlim(-0.125, 1.125) ax.set_ylim(-0.125, 1.125) save_image(ax.figure, "triangle_constructor.png")
Image for :class`.Triangle` docstring.
https://github.com/dhermes/bezier/blob/4b54ac978158870122579c6fb7dbf070ac20a2d6/docs/make_images.py#L406-L467
import os try: from matplotlib import patches from matplotlib import path as _path_mod import matplotlib.pyplot as plt except ImportError: patches = None _path_mod = None plt = None import numpy as np try: import seaborn except ImportError: seaborn = None import bezier from bezier import _geometric_intersection from bezier import _helpers from bezier import _plot_helpers from bezier.hazmat import clipping from bezier.hazmat import geometric_intersection as _py_geometric_intersection BLUE = "blue" GREEN = "green" RED = "red" if seaborn is not None: seaborn.set() _COLORS = seaborn.color_palette(palette="deep", n_colors=6) BLUE = _COLORS[0] GREEN = _COLORS[2] RED = _COLORS[3] del _COLORS _DOCS_DIR = os.path.abspath(os.path.dirname(__file__)) IMAGES_DIR = os.path.join(_DOCS_DIR, "images") NO_IMAGES = "GENERATE_IMAGES" not in os.environ def save_image(figure, filename): path = os.path.join(IMAGES_DIR, filename) figure.savefig(path, bbox_inches="tight") plt.close(figure) def stack1d(*points): result = np.empty((2, len(points)), order="F") for index, point in enumerate(points): result[:, index] = point return result def linearization_error(nodes): if NO_IMAGES: return curve = bezier.Curve.from_nodes(nodes) line = bezier.Curve.from_nodes(nodes[:, (0, -1)]) midpoints = np.hstack([curve.evaluate(0.5), line.evaluate(0.5)]) ax = curve.plot(256, color=BLUE) line.plot(256, ax=ax, color=GREEN) ax.plot( midpoints[0, :], midpoints[1, :], color="black", linestyle="dashed" ) ax.axis("scaled") save_image(ax.figure, "linearization_error.png") def newton_refine1(s, new_s, curve1, t, new_t, curve2): if NO_IMAGES: return points = np.hstack([curve1.evaluate(s), curve2.evaluate(t)]) points_new = np.hstack([curve1.evaluate(new_s), curve2.evaluate(new_t)]) ax = curve1.plot(256, color=BLUE) curve2.plot(256, ax=ax, color=GREEN) ax.plot( points[0, :], points[1, :], color="black", linestyle="None", marker="o", markeredgewidth=1, markerfacecolor="None", ) ax.plot( points_new[0, :], points_new[1, :], color="black", linestyle="None", marker="o", ) ax.axis("scaled") save_image(ax.figure, "newton_refine1.png") def newton_refine2(s_vals, curve1, curve2): if NO_IMAGES: return ax = curve1.plot(256, color=BLUE) ax.lines[-1].zorder = 1 curve2.plot(256, ax=ax, color=GREEN) ax.lines[-1].zorder = 1 points = curve1.evaluate_multi(np.asfortranarray(s_vals)) colors = seaborn.dark_palette("blue", 5) ax.scatter( points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2 ) ax.axis("scaled") ax.set_xlim(0.0, 1.0) ax.set_ylim(0.0, 1.0) save_image(ax.figure, "newton_refine2.png") def newton_refine3(s_vals, curve1, curve2): if NO_IMAGES: return ax = curve1.plot(256, color=BLUE) ax.lines[-1].zorder = 1 curve2.plot(256, ax=ax, color=GREEN) ax.lines[-1].zorder = 1 points = curve1.evaluate_multi(np.asfortranarray(s_vals)) colors = seaborn.dark_palette("blue", 6) ax.scatter( points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2 ) ax.axis("scaled") ax.set_xlim(0.0, 1.0) ax.set_ylim(0.0, 0.5625) save_image(ax.figure, "newton_refine3.png") def segment_intersection1(start0, end0, start1, end1, s): if NO_IMAGES: return line0 = bezier.Curve.from_nodes(stack1d(start0, end0)) line1 = bezier.Curve.from_nodes(stack1d(start1, end1)) ax = line0.plot(2, color=BLUE) line1.plot(256, ax=ax, color=GREEN) (x_val,), (y_val,) = line0.evaluate(s) ax.plot([x_val], [y_val], color="black", marker="o") ax.axis("scaled") save_image(ax.figure, "segment_intersection1.png") def segment_intersection2(start0, end0, start1, end1): if NO_IMAGES: return line0 = bezier.Curve.from_nodes(stack1d(start0, end0)) line1 = bezier.Curve.from_nodes(stack1d(start1, end1)) ax = line0.plot(2, color=BLUE) line1.plot(2, ax=ax, color=GREEN) ax.axis("scaled") save_image(ax.figure, "segment_intersection2.png") def helper_parallel_lines(start0, end0, start1, end1, filename): if NO_IMAGES: return figure = plt.figure() ax = figure.gca() points = stack1d(start0, end0, start1, end1) ax.plot(points[0, :2], points[1, :2], marker="o", color=BLUE) ax.plot(points[0, 2:], points[1, 2:], marker="o", color=GREEN) ax.axis("scaled") _plot_helpers.add_plot_boundary(ax) save_image(figure, filename) def add_patch( ax, nodes, color, with_nodes=True, alpha=0.625, node_color="black" ): path = _path_mod.Path(nodes.T) patch = patches.PathPatch( path, edgecolor=color, facecolor=color, alpha=alpha ) ax.add_patch(patch) if with_nodes: ax.plot( nodes[0, :], nodes[1, :], color=node_color, linestyle="None", marker="o", ) def curve_constructor(curve): if NO_IMAGES: return ax = curve.plot(256, color=BLUE) line = ax.lines[0] nodes = curve._nodes ax.plot( nodes[0, :], nodes[1, :], color="black", linestyle="None", marker="o" ) add_patch(ax, nodes, line.get_color()) ax.axis("scaled") ax.set_xlim(-0.125, 1.125) ax.set_ylim(-0.0625, 0.5625) save_image(ax.figure, "curve_constructor.png") def curve_evaluate(curve): if NO_IMAGES: return ax = curve.plot(256, color=BLUE) points = curve.evaluate_multi(np.asfortranarray([0.75])) ax.plot( points[0, :], points[1, :], color="black", linestyle="None", marker="o" ) ax.axis("scaled") ax.set_xlim(-0.125, 1.125) ax.set_ylim(-0.0625, 0.5625) save_image(ax.figure, "curve_evaluate.png") def curve_evaluate_hodograph(curve, s): if NO_IMAGES: return ax = curve.plot(256, color=BLUE) points = curve.evaluate_multi(np.asfortranarray([s])) if points.shape != (2, 1): raise ValueError("Unexpected shape", points) point = points[:, 0] tangents = curve.evaluate_hodograph(s) if tangents.shape != (2, 1): raise ValueError("Unexpected shape", tangents) tangent = tangents[:, 0] ax.plot( [point[0] - 2 * tangent[0], point[0] + 2 * tangent[0]], [point[1] - 2 * tangent[1], point[1] + 2 * tangent[1]], color=BLUE, alpha=0.5, ) ax.plot( [point[0], point[0] + tangent[0]], [point[1], point[1] + tangent[1]], color="black", linestyle="dashed", marker="o", markersize=5, ) ax.axis("scaled") ax.set_xlim(-0.125, 1.75) ax.set_ylim(-0.0625, 0.75) save_image(ax.figure, "curve_evaluate_hodograph.png") def curve_subdivide(curve, left, right): if NO_IMAGES: return figure = plt.figure() ax = figure.gca() add_patch(ax, curve._nodes, "gray") ax = left.plot(256, ax=ax, color=BLUE) line = ax.lines[-1] add_patch(ax, left._nodes, line.get_color()) right.plot(256, ax=ax, color=GREEN) line = ax.lines[-1] add_patch(ax, right._nodes, line.get_color()) ax.axis("scaled") ax.set_xlim(-0.125, 2.125) ax.set_ylim(-0.125, 3.125) save_image(ax.figure, "curve_subdivide.png") def curve_intersect(curve1, curve2, s_vals): if NO_IMAGES: return ax = curve1.plot(256, color=BLUE) curve2.plot(256, ax=ax, color=GREEN) intersections = curve1.evaluate_multi(s_vals) ax.plot( intersections[0, :], intersections[1, :], color="black", linestyle="None", marker="o", ) ax.axis("scaled") ax.set_xlim(0.0, 0.75) ax.set_ylim(0.0, 0.75) save_image(ax.figure, "curve_intersect.png") def curve_self_intersect2(curve, self_intersections): if NO_IMAGES: return ax = curve.plot(256, color=BLUE) if self_intersections.shape != (2, 1): raise ValueError("Unexpected shape", self_intersections) s1_val = self_intersections[0, 0] intersection_xy = curve.evaluate(s1_val) ax.plot( intersection_xy[0, :], intersection_xy[1, :], color="black", linestyle="None", marker="o", ) ax.axis("scaled") ax.set_xlim(-0.8125, 0.0625) ax.set_ylim(0.75, 2.125) save_image(ax.figure, "curve_self_intersect2.png") def curve_self_intersect3(curve, self_intersections): if NO_IMAGES: return ax = curve.plot(256, color=BLUE) if self_intersections.shape != (2, 2): raise ValueError("Unexpected shape", self_intersections) s1_vals = np.asfortranarray(self_intersections[0, :]) intersection_xy = curve.evaluate_multi(s1_vals) ax.plot( intersection_xy[0, :], intersection_xy[1, :], color="black", linestyle="None", marker="o", ) ax.axis("scaled") ax.set_xlim(-330.0, 330.0) ax.set_ylim(0.125, 266.0) save_image(ax.figure, "curve_self_intersect3.png")
Apache License 2.0
nastools/homeassistant
homeassistant/components/media_player/panasonic_viera.py
PanasonicVieraTVDevice.turn_off
python
def turn_off(self): self.send_key('NRC_POWER-ONOFF')
Turn off media player.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/media_player/panasonic_viera.py#L128-L130
import logging import voluptuous as vol from homeassistant.components.media_player import ( SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.const import ( CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_PORT) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['panasonic_viera==0.2'] _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Panasonic Viera TV' DEFAULT_PORT = 55000 SUPPORT_VIERATV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) def setup_platform(hass, config, add_devices, discovery_info=None): from panasonic_viera import RemoteControl name = config.get(CONF_NAME) port = config.get(CONF_PORT) if discovery_info: _LOGGER.debug('%s', discovery_info) vals = discovery_info.split(':') if len(vals) > 1: port = vals[1] host = vals[0] remote = RemoteControl(host, port) add_devices([PanasonicVieraTVDevice(name, remote)]) return True host = config.get(CONF_HOST) remote = RemoteControl(host, port) try: remote.get_mute() except OSError as error: _LOGGER.error('Panasonic Viera TV is not available at %s:%d: %s', host, port, error) return False add_devices([PanasonicVieraTVDevice(name, remote)]) return True class PanasonicVieraTVDevice(MediaPlayerDevice): def __init__(self, name, remote): self._name = name self._muted = False self._playing = True self._state = STATE_UNKNOWN self._remote = remote self._volume = 0 def update(self): try: self._muted = self._remote.get_mute() self._volume = self._remote.get_volume() / 100 self._state = STATE_ON except OSError: self._state = STATE_OFF def send_key(self, key): try: self._remote.send_key(key) self._state = STATE_ON except OSError: self._state = STATE_OFF return False return True @property def name(self): return self._name @property def state(self): return self._state @property def volume_level(self): return self._volume @property def is_volume_muted(self): return self._muted @property def supported_media_commands(self): return SUPPORT_VIERATV
MIT License
tducret/amazon-scraper-python
amazonscraper/client.py
Client._get_title
python
def _get_title(self, product): title_css_selectors = [ 'h5 span', "a.s-access-detail-page > h2", "div div.sg-row h5 > span" ] for selector in title_css_selectors: title = _css_select(product, selector) if title: break if not title: print(' Failed to extract title!') return title
Given the HTML of a `product`, extract the title
https://github.com/tducret/amazon-scraper-python/blob/30f812f0d2f2e7dd2f2af7ec1ad23626a0a0cabd/amazonscraper/client.py#L222-L241
import requests import re from urllib.parse import urljoin from bs4 import BeautifulSoup import time _BASE_URL = "https://www.amazon.com/" _DEFAULT_BEAUTIFULSOUP_PARSER = "html.parser" _DEFAULT_USER_AGENT = 'Mozilla/5.0 (Linux; Android 7.0; \ SM-A520F Build/NRD90M; wv) AppleWebKit/537.36 \ (KHTML, like Gecko) Version/4.0 \ Chrome/65.0.3325.109 Mobile Safari/537.36' _CHROME_DESKTOP_USER_AGENT = 'Mozilla/5.0 (Macintosh; \ Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) \ Chrome/67.0.3396.79 Safari/537.36' _USER_AGENT_LIST = [ _DEFAULT_USER_AGENT, _CHROME_DESKTOP_USER_AGENT, ] _CSS_SELECTORS_MOBILE = { "product": "#resultItems > li", "title": "a > div > div.sx-table-detail > h5 > span", "rating": "a > div > div.sx-table-detail > \ div.a-icon-row.a-size-small > i > span", "review_nb": "a > div > div.sx-table-detail > \ div.a-icon-row.a-size-small > span", "url": "a[href]", "img": "img[src]", "next_page_url": "ul.a-pagination > li.a-last > a[href]", } _CSS_SELECTORS_MOBILE_GRID = { "product": "#grid-atf-content > li > div.s-item-container", "title": "a > div > h5.sx-title > span", "rating": "a > div > div.a-icon-row.a-size-mini > i > span", "review_nb": "a > div > div.a-icon-row.a-size-mini > span", "url": "a[href]", "img": "img[src]", "next_page_url": "ul.a-pagination > li.a-last > a[href]", } _CSS_SELECTORS_DESKTOP = { "product": "ul > li.s-result-item > div.s-item-container", "title": "a.s-access-detail-page > h2", "rating": "i.a-icon-star > span", "review_nb": "div.a-column.a-span5.a-span-last > \ div.a-row.a-spacing-mini > \ a.a-size-small.a-link-normal.a-text-normal", "url": "div.a-row.a-spacing-small > div.a-row.a-spacing-none > a[href]", "img": "div.a-column.a-span12.a-text-center > a.a-link-normal.a-text-normal > img[src]", "next_page_url": "a#pagnNextLink", } _CSS_SELECTORS_DESKTOP_2 = { "product": "div.s-result-list.sg-row > div.s-result-item", "title": "div div.sg-row h5 > span", "rating": "div div.sg-row .a-spacing-top-mini i span", "review_nb": "div div.sg-row .a-spacing-top-mini span.a-size-small", "url": "div div a.a-link-normal", "img": "img[src]", "next_page_url": "li.a-last > a[href]", } _CSS_SELECTOR_LIST = [ _CSS_SELECTORS_MOBILE, _CSS_SELECTORS_MOBILE_GRID, _CSS_SELECTORS_DESKTOP, _CSS_SELECTORS_DESKTOP_2, ] _MAX_TRIAL_REQUESTS = 5 _WAIT_TIME_BETWEEN_REQUESTS = 1 class Client(object): def __init__(self): self.session = requests.session() self.current_user_agent_index = 0 self.headers = { 'Host': 'www.amazon.com', 'User-Agent': _USER_AGENT_LIST[0], 'Accept': 'text/html,application/xhtml+xml,\ application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', } self.product_dict_list = [] self.html_pages = [] def _change_user_agent(self): index = (self.current_user_agent_index + 1) % len(_USER_AGENT_LIST) self.headers['User-Agent'] = _USER_AGENT_LIST[index] self.current_user_agent_index = index def _get(self, url): ret = self.session.get(url, headers=self.headers) if ret.status_code != 200: raise ConnectionError( 'Status code {status} for url {url}\n{content}'.format( status=ret.status_code, url=url, content=ret.text)) return ret def _update_headers(self, search_url): self.base_url = "https://" + search_url.split("://")[1].split("/")[0] + "/" self.headers['Host'] = self.base_url.split("://")[1].split("/")[0] def _get_search_url(self, keywords): search_url = urljoin(_BASE_URL, ("s?k=%s" % (keywords))) return search_url def _check_page(self, html_content): if "Sign in for the best experience" in html_content: valid_page = False elif "The request could not be satisfied." in html_content: valid_page = False elif "Robot Check" in html_content: valid_page = False else: valid_page = True return valid_page def _get_page_html(self, search_url): trials = 0 res = None while trials < _MAX_TRIAL_REQUESTS: print('Trying user agent: {}'.format(self.headers['User-Agent'])) trials += 1 try: res = self._get(search_url) valid_page = self._check_page(res.text) except requests.exceptions.SSLError: valid_page = False except ConnectionError: valid_page = False if valid_page: break self._change_user_agent() time.sleep(_WAIT_TIME_BETWEEN_REQUESTS) if not valid_page: raise ValueError('No valid pages found! Perhaps the page returned is a CAPTCHA? Check products.last_html_page') return res.text def _get_n_ratings(self, product): n_ratings_css_selectors = [ "div.a-row.a-size-small span.a-size-base", "div div.sg-row .a-spacing-top-mini span.a-size-small", "div.a-column.a-span5.a-span-last > div.a-row.a-spacing-mini > a.a-size-small.a-link-normal.a-text-normal", ] for selector in n_ratings_css_selectors: n_ratings = _css_select(product, selector) try: n_ratings = int(n_ratings.replace(',', '')) break except ValueError: pass if not n_ratings: print(f' Failed to extract number of ratings!') return float('nan') return n_ratings
MIT License
xuru/pyvisdk
pyvisdk/mo/distributed_virtual_switch.py
DistributedVirtualSwitch.MergeDvs_Task
python
def MergeDvs_Task(self, dvs): return self.delegate("MergeDvs_Task")(dvs)
Merge an existing DistributedVirtualSwitch (source) to this switch (destination). The host members and the connected entity of the source switch will be transferred to the destination switch. This operation disconnects the entities from the source switch, tears down its host proxy VirtualSwitches, creates new ones for the destination switch, and reconnects the entities to the destination switch.Merge an existing DistributedVirtualSwitch (source) to this switch (destination). The host members and the connected entity of the source switch will be transferred to the destination switch. This operation disconnects the entities from the source switch, tears down its host proxy VirtualSwitches, creates new ones for the destination switch, and reconnects the entities to the destination switch.Merge an existing DistributedVirtualSwitch (source) to this switch (destination). The host members and the connected entity of the source switch will be transferred to the destination switch. This operation disconnects the entities from the source switch, tears down its host proxy VirtualSwitches, creates new ones for the destination switch, and reconnects the entities to the destination switch. :param dvs: The switch (source) to be merged
https://github.com/xuru/pyvisdk/blob/de24eb4426eb76233dc2e57640d3274ffd304eb3/pyvisdk/mo/distributed_virtual_switch.py#L91-L112
from pyvisdk.base.managed_object_types import ManagedObjectTypes from pyvisdk.mo.managed_entity import ManagedEntity import logging log = logging.getLogger(__name__) class DistributedVirtualSwitch(ManagedEntity): def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.DistributedVirtualSwitch): super(DistributedVirtualSwitch, self).__init__(core, name=name, ref=ref, type=type) @property def capability(self): return self.update('capability') @property def config(self): return self.update('config') @property def networkResourcePool(self): return self.update('networkResourcePool') @property def portgroup(self): return self.update('portgroup') @property def summary(self): return self.update('summary') @property def uuid(self): return self.update('uuid') def AddDVPortgroup_Task(self, spec): return self.delegate("AddDVPortgroup_Task")(spec) def AddNetworkResourcePool(self, configSpec): return self.delegate("AddNetworkResourcePool")(configSpec) def EnableNetworkResourceManagement(self, enable): return self.delegate("EnableNetworkResourceManagement")(enable) def FetchDVPortKeys(self, criteria=None): return self.delegate("FetchDVPortKeys")(criteria) def FetchDVPorts(self, criteria=None): return self.delegate("FetchDVPorts")(criteria)
MIT License
tcalmant/ipopo
pelix/shell/parser.py
_find_assignment
python
def _find_assignment(arg_token): idx = arg_token.find("=") while idx != -1: if idx != 0 and arg_token[idx - 1] != "\\": return idx idx = arg_token.find("=", idx + 1) return -1
Find the first non-escaped assignment in the given argument token. Returns -1 if no assignment was found. :param arg_token: The argument token :return: The index of the first assignment, or -1
https://github.com/tcalmant/ipopo/blob/1d4b81207e67890dfccc8f562336c7104f194c17/pelix/shell/parser.py#L62-L79
import collections import inspect import logging import shlex import string import sys from pelix.utilities import to_str, get_method_arguments import pelix.shell.beans as beans from pelix.shell.completion.decorators import ATTR_COMPLETERS, CompletionInfo __version_info__ = (1, 0, 1) __version__ = ".".join(str(x) for x in __version_info__) __docformat__ = "restructuredtext en" DEFAULT_NAMESPACE = "default"
Apache License 2.0
sql-machine-learning/models
sqlflow_models/simple_dnn_generator.py
_SimpleDNNBuilder.__init__
python
def __init__(self, feature_columns, optimizer, layer_size, num_layers, learn_mixture_weights, seed): self._optimizer = optimizer self._layer_size = layer_size self._num_layers = num_layers self._learn_mixture_weights = learn_mixture_weights self._feature_columns = feature_columns self._seed = seed
Initializes a `_DNNBuilder`. Args: optimizer: An `Optimizer` instance for training both the subnetwork and the mixture weights. layer_size: The number of nodes to output at each hidden layer. num_layers: The number of hidden layers. learn_mixture_weights: Whether to solve a learning problem to find the best mixture weights, or use their default value according to the mixture weight type. When `False`, the subnetworks will return a no_op for the mixture weight train op. seed: A random seed. Returns: An instance of `_SimpleDNNBuilder`.
https://github.com/sql-machine-learning/models/blob/5dc6421f562ea447e501fa355a48a6ee89856a1d/sqlflow_models/simple_dnn_generator.py#L16-L40
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import adanet import tensorflow as tf _NUM_LAYERS_KEY = "num_layers" class _SimpleDNNBuilder(adanet.subnetwork.Builder):
Apache License 2.0
neovim/pynvim
pynvim/api/common.py
Remote.__repr__
python
def __repr__(self): return '<%s(handle=%r)>' % ( self.__class__.__name__, self.handle, )
Get text representation of the object.
https://github.com/neovim/pynvim/blob/71102c03efdcd6e7a1db1057a68478fc2249734d/pynvim/api/common.py#L40-L45
import functools from msgpack import unpackb from pynvim.compat import unicode_errors_default __all__ = () class NvimError(Exception): pass class Remote(object): def __init__(self, session, code_data): self._session = session self.code_data = code_data self.handle = unpackb(code_data[1]) self.api = RemoteApi(self, self._api_prefix) self.vars = RemoteMap(self, self._api_prefix + 'get_var', self._api_prefix + 'set_var', self._api_prefix + 'del_var') self.options = RemoteMap(self, self._api_prefix + 'get_option', self._api_prefix + 'set_option')
Apache License 2.0
xknx/xknx
xknx/devices/devices.py
Devices.__getitem__
python
def __getitem__(self, key: str | int) -> Device: for device in self.__devices: if device.name == key: return device if isinstance(key, int): return self.__devices[key] raise KeyError
Return device by name or by index.
https://github.com/xknx/xknx/blob/87666cc9bd9da64a84305baeff84486097346111/xknx/devices/devices.py#L48-L55
from __future__ import annotations from typing import Awaitable, Callable, Iterator from xknx.telegram import Telegram from xknx.telegram.address import DeviceGroupAddress, GroupAddress, InternalGroupAddress from .device import Device DeviceCallbackType = Callable[[Device], Awaitable[None]] class Devices: def __init__(self) -> None: self.__devices: list[Device] = [] self.device_updated_cbs: list[DeviceCallbackType] = [] def register_device_updated_cb(self, device_updated_cb: DeviceCallbackType) -> None: self.device_updated_cbs.append(device_updated_cb) def unregister_device_updated_cb( self, device_updated_cb: DeviceCallbackType ) -> None: self.device_updated_cbs.remove(device_updated_cb) def __iter__(self) -> Iterator[Device]: yield from self.__devices def devices_by_group_address( self, group_address: DeviceGroupAddress ) -> Iterator[Device]: for device in self.__devices: if device.has_group_address(group_address): yield device
MIT License
monzop/bioblender
bin/pdb2pqr-1.6/querystatus.py
mainCGI
python
def mainCGI(): logopts = {} print "Content-type: text/html\n\n" calctype = form["calctype"].value if form["jobid"].value == 'False': print printheader("%s Job Status Page" % calctype.upper()) progress = "version_mismatch" runtime = 0 else: progress = None if have_opal: if calctype=="pdb2pqr": opal_url = PDB2PQR_OPAL_URL elif calctype=="apbs": opal_url = APBS_OPAL_URL appLocator = AppServiceLocator() appServicePort = appLocator.getAppServicePort(opal_url) else: appServicePort = None if calctype=="pdb2pqr": apbsInputFile = open('%s%s%s/apbs_input' % (INSTALLDIR, TMPDIR, form["jobid"].value)) apbs_input = apbsInputFile.read() apbsInputFile.close() if apbs_input=="True": apbs_input = True else: apbs_input = False typemapInputFile = open('%s%s%s/typemap' % (INSTALLDIR, TMPDIR, form["jobid"].value)) typemap = typemapInputFile.read() typemapInputFile.close() if typemap=="True": typemap = True else: typemap = False if have_opal and progress == None: if form["calctype"].value=="pdb2pqr": pdb2pqrJobIDFile = open('%s%s%s/pdb2pqr_opal_job_id' % (INSTALLDIR, TMPDIR, form["jobid"].value)) jobid = pdb2pqrJobIDFile.read() pdb2pqrJobIDFile.close() elif form["calctype"].value=="apbs": apbsJobIDFile = open('%s%s%s/apbs_opal_job_id' % (INSTALLDIR, TMPDIR, form["jobid"].value)) jobid = apbsJobIDFile.read() apbsJobIDFile.close() else: jobid = form["jobid"].value if progress == None: cp = checkprogress(jobid,appServicePort,calctype) progress = cp[0] if progress == "running" or progress == "complete": timefile = open('%s%s%s/%s_start_time' % (INSTALLDIR, TMPDIR, form["jobid"].value, form["calctype"].value)) starttime = float(timefile.read()) timefile.close() if progress == "running" or (have_opal and progress != "version_mismatch"): runtime = time.time()-starttime elif progress == "complete": endtimefile = open('%s%s%s/%s_end_time' % (INSTALLDIR, TMPDIR, form["jobid"].value, form["calctype"].value)) runtime = float(endtimefile.read())-starttime if progress == "running": if calctype=="pdb2pqr": resultsurl = '%squerystatus.cgi?jobid=%s&apbsinput=%s&calctype=pdb2pqr' % (WEBSITE, form["jobid"].value, apbs_input) else: resultsurl = '%squerystatus.cgi?jobid=%s&calctype=apbs' % (WEBSITE, form["jobid"].value) if progress == "complete": print printheader("%s Job Status Page" % calctype.upper()) elif progress == "error": print printheader("%s Job Status Page - Error" % calctype.upper(),0) elif progress == "running": print printheader("%s Job Status Page" % calctype.upper(), refresh) print "<BODY>\n<P>" print "<h3>Status" print "</h3>" print "Message: %s<br />" % progress print "Run time: %s seconds<br />" % int(runtime) print "Current time: %s<br />" % time.asctime() print "</P>\n<HR>\n<P>" if progress == "complete": if calctype=="pdb2pqr": nexturl = 'apbs_cgi.cgi?jobid=%s' % form["jobid"].value else: nexturl = 'visualize.cgi?jobid=%s' % form["jobid"].value if have_opal: resp = appServicePort.getOutputs(getOutputsRequest(jobid)) filelist = resp._outputFile print "Here are the results:<ul>" print "<li>Input files<ul>" if calctype=="pdb2pqr": if have_opal: for i in range(0,len(filelist)): if len(filelist[i]._name) == 4: print "<li><a href=%s>%s</a></li>" % (filelist[i]._url, filelist[i]._name) else: print "<li><a href=%s%s%s/%s.pdb>%s.pdb</a></li>" % (WEBSITE, TMPDIR, jobid, jobid, jobid) elif calctype=="apbs": if have_opal: for i in range(0,len(filelist)): if filelist[i]._name == "apbsinput.in" or filelist[i]._name[-4:] == ".pqr": print "<li><a href=%s>%s</a></li>" % (filelist[i]._url, filelist[i]._name) else: print "<li><a href=%s%s%s/apbsinput.in>apbsinput.in</a></li>" % (WEBSITE, TMPDIR, jobid) print "<li><a href=%s%s%s/%s.pqr>%s.pqr</a></li>" % (WEBSITE, TMPDIR, jobid, jobid, jobid) print "</ul></li>" print "<li>Output files<ul>" if calctype=="pdb2pqr": if have_opal: if os.path.isfile('%s%s%s/pdb2pqr_opal_log' % (INSTALLDIR, TMPDIR, form["jobid"].value)): pdb2pqrOpalLogFile=open('%s%s%s/pdb2pqr_opal_log' % (INSTALLDIR, TMPDIR, form["jobid"].value), 'r') logstr=pdb2pqrOpalLogFile.read().split('\n') logopts = eval(logstr[0]) logff = logstr[1] REMOTE_ADDR = logstr[2] pdb2pqrOpalLogFile.close() for i in range(0,len(filelist)): if filelist[i]._name[-7:]==".propka" or (filelist[i]._name[-13:]=="-typemap.html" and typemap == True) or filelist[i]._name[-4:]==".pqr" or filelist[i]._name[-3:]==".in": if filelist[i]._name[-4:]==".pqr": f=urllib.urlopen(filelist[i]._url) pqrOpalFileLength = len(f.readlines()) f.close() print "<li><a href=%s>%s</a></li>" % (filelist[i]._url, filelist[i]._name) logRun(logopts, runtime, pqrOpalFileLength, logff, REMOTE_ADDR) else: outputfilelist = glob.glob('%s%s%s/*.propka' % (INSTALLDIR, TMPDIR, jobid)) for i in range(0,len(outputfilelist)): outputfilelist[i] = os.path.basename(outputfilelist[i]) for extension in ["-typemap.html", ".pqr", ".in"]: if extension != ".in" or apbs_input != False: if extension == "-typemap.html" and typemap == False: continue outputfilelist.append('%s%s' % (jobid, extension)) for outputfile in outputfilelist: print "<li><a href=%s%s%s/%s>%s</a></li>" % (WEBSITE, TMPDIR, jobid, outputfile, outputfile) elif calctype=="apbs": if have_opal: for i in range(0,len(filelist)): if filelist[i]._name[-3:]==".dx": currentpath = os.getcwd() zipjobid = filelist[i]._name.split("-")[0] urllib.urlretrieve(filelist[i]._url, '%s%s%s/%s' % (INSTALLDIR, TMPDIR, zipjobid, filelist[i]._name)) os.chdir('%s%s%s' % (INSTALLDIR, TMPDIR, zipjobid)) syscommand = 'zip -9 ' + filelist[i]._name + '.zip ' + filelist[i]._name os.system(syscommand) os.chdir(currentpath) outputfilezip = filelist[i]._name + '.zip' print "<li><a href=%s%s%s/%s>%s</a></li>" % (WEBSITE, TMPDIR, zipjobid, outputfilezip, outputfilezip) else: outputfilelist = glob.glob('%s%s%s/%s-*.dx' % (INSTALLDIR, TMPDIR, jobid, jobid)) for outputfile in outputfilelist: currentpath = os.getcwd() workingpath = os.path.dirname(outputfile) os.chdir(workingpath) syscommand = 'zip -9 ' + os.path.basename(outputfile) + '.zip ' + os.path.basename(outputfile) os.system(syscommand) os.chdir(currentpath) outputfilezip = outputfile+".zip" print "<li><a href=%s%s%s/%s>%s</a></li>" % (WEBSITE, TMPDIR, jobid, os.path.basename(outputfilezip), os.path.basename(outputfilezip)) print "</ul></li>" print "<li>Runtime and debugging information<ul>" if have_opal: stdouturl = resp._stdOut stderrurl = resp._stdErr else: stdouturl = "%s%s%s/%s_stdout.txt" % (WEBSITE, TMPDIR, jobid, calctype) stderrurl = "%s%s%s/%s_stderr.txt" % (WEBSITE, TMPDIR, jobid, calctype) print "<li><a href=%s>Program output (stdout)</a></li>" % stdouturl print "<li><a href=%s>Program errors and warnings (stderr)</a></li>" % stderrurl print "</ul></li></ul>" if calctype=="pdb2pqr" and apbs_input and HAVE_APBS!="": print "</ul></p><hr><p><a href=%s>Click here</a> to run APBS with your results.</p>" % nexturl elif calctype=="apbs": print "</ul></p><hr><p><a href=%s>Click here</a> to visualize your results.</p>" % nexturl elif progress == "error": print "There was an error with your query request. This page will not refresh." elif progress == "running": print "Page will refresh in %d seconds<br />" % refresh print "<HR>" print "<small>Your results will appear at <a href=%s>this page</a>. If you want, you can bookmark it and come back later (note: results are only stored for approximately 12-24 hours).</small>" % resultsurl elif progress == "version_mismatch": print "The versions of APBS on the local server and on the Opal server do not match, so the calculation could not be completed" print "</P>" print "<script type=\"text/javascript\">" print "var gaJsHost = ((\"https:\" == document.location.protocol) ? \"https://ssl.\" : \"http://www.\");" print "document.write(unescape(\"%3Cscript src=\'\" + gaJsHost + \"google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E\"));" print "</script>" print "<script type=\"text/javascript\">" print "try {" print "var pageTracker = _gat._getTracker(\"UA-11026338-3\");" if logopts != {}: for key in logopts: print "pageTracker._trackPageview(\"/main_cgi/has_%s_%s.html\");" % (key, logopts[key]) print "pageTracker._trackPageview();" print "} catch(err) {}</script>" print "</BODY>" print "</HTML>"
Main method for determining the query page output
https://github.com/monzop/bioblender/blob/57a6ed4dffaa8e43f39fcfa5481048b8f7cc369c/bin/pdb2pqr-1.6/querystatus.py#L259-L517
__date__ = "4 January 2010" __author__ = "Wes Goodman, Samir Unni, Yong Huang" import sys import cgi import cgitb import os,shutil,glob,string,time,urllib from src.server import * from src.aconf import * cgitb.enable() form = cgi.FieldStorage() def printheader(pagetitle,refresh=None): str = "" str+= "<html>\n" str+= "<HEAD>\n" if refresh: str+= "\t<META HTTP-EQUIV=\"Refresh\" CONTENT=\"%s\">\n" % refresh str+= "\t<TITLE>%s</TITLE>\n" % pagetitle str+= "\t<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">\n" % STYLESHEET str+= "</HEAD>\n" return str def getloads(): if loadpath == "": return none try: file = open(loadpath, 'ru') except ioerror: return none line = file.readline() words = string.split(line) loads = words[:3] return loads def cleantmpdir(): newdir = [] size = 0.0 count = 0 path = INSTALLDIR + tmpdir dir = os.listdir(path) for filename in dir: size = size + os.path.getsize("%s%s" % (path, filename)) period = string.find(filename,".") id = filename[:period] if id not in newdir: newdir.append(id) count += 1 newdir.sort() size = size / (1024.0 * 1024.0) newcount = 0 if size >= limit: for filename in newdir: if newcount > count/2.0: break try: os.remove("%s%s.pqr" % (path, filename)) except oserror: pass try: os.remove("%s%s.in" % (path, filename)) except oserror: pass try: os.remove("%s%s.html" % (path, filename)) except oserror: pass newcount += 1 def getquote(path): fortune = os.popen(path) quote = fortune.read() quote = string.replace(quote, "\n", "<br>") quote = string.replace(quote, "\t", "&nbsp;"*5) quote = "%s<p>" % quote return quote def printprogress(name, refreshname, reftime, starttime): elapsedtime = time.time() - starttime + refreshtime/2.0 filename = "%s%s%s/%s-tmp.html" % (INSTALLDIR, tmpdir, jobid, name) file = open(filename,"w") file.write("<html>\n") file.write("<head>\n") file.write("<title>pdb2pqr progress</title>\n") file.write("<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">\n" % stylesheet) file.write("<meta http-equiv=\"refresh\" content=\"%s; url=%s\">\n" % (reftime, refreshname)) file.write("</head>\n") file.write("<body>\n") file.write("<h2>pdb2pqr progress</h2><p>\n") file.write("the pdb2pqr server is generating your results - this page will automatically \n") file.write("refresh every %s seconds.<p>\n" % refreshtime) file.write("thank you for your patience!<p>\n") file.write("server progress:<p>\n") file.write("<blockquote>\n") file.write("<font size=2>elapsed time:</font> <code>%.2f seconds</code><br>\n" % elapsedtime) file.write("</blockquote>\n") file.write("server information:<p>\n") file.write("<blockquote>\n") loads = getloads() if loads != none: file.write("<font size=2>server load:</font> <code>%s (1min) %s (5min) %s (15min)</code><br>\n" % (loads[0], loads[1], loads[2])) file.write("<font size=2>server time:</font> <code>%s</code><br>\n" % (time.asctime(time.localtime()))) file.write("</blockquote>\n") file.write("<script type=\"text/javascript\">") file.write("var gaJsHost = ((\"https:\" == document.location.protocol) ? \"https://ssl.\" : \"http://www.\");") file.write("document.write(unescape(\"%3Cscript src=\'\" + gaJsHost + \"google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E\"));") file.write("</script>") file.write("<script type=\"text/javascript\">") file.write("try {") file.write("var pageTracker = _gat._getTracker(\"UA-11026338-3\");") file.write("pageTracker._trackPageview();") file.write("} catch(err) {}</script>") file.write("</body></html>") file.close() def createresults(header, input, name, time, missedligands=[]): newheader = string.replace(header, "\n", "<br>") newheader = string.replace(newheader," ","&nbsp;") filename = "%s%s%s/%s.html" % (INSTALLDIR, tmpdir, jobid, name) file = open(filename, "w") file.write("<html>\n") file.write("<head>\n") file.write("<title>pdb2pqr results</title>\n") file.write("<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">\n" % stylesheet) file.write("</head>\n") file.write("<body>\n") file.write("<h2>pdb2pqr results</h2>\n") file.write("<p>\n") file.write("here are the results from pdb2pqr. the files will be available on the ") file.write("server for a short period of time if you need to re-access the results.<p>\n") file.write("<a href=\"%s%s%s.pqr\">%s.pqr</a><br>\n" % (website, tmpdir, name, name)) if input: file.write("<a href=\"%s%s%s.in\">%s.in</a><br>\n" % (website, tmpdir, name, name)) pkaname = "%s%s%s/%s.propka" % (INSTALLDIR, tmpdir, jobid, name) if os.path.isfile(pkaname): file.write("<a href=\"%s%s%s.propka\">%s.propka</a><br>\n" % (website, tmpdir, name, name)) typename = "%s%s%s/%s-typemap.html" % (INSTALLDIR, tmpdir, jobid, name) if os.path.isfile(typename): file.write("<a href=\"%s%s%s-typemap.html\">%s-typemap.html</a><br>\n" % (website, tmpdir, name, name)) file.write("<p>the header for your pqr file, including any warnings generated, is:<p>\n") file.write("<blockquote><code>\n") file.write("%s<p>\n" % newheader) file.write("</code></blockquote>\n") if missedligands != []: file.write("the forcefield that you have selected does not have ") file.write("parameters for the following ligands in your pdb file. please visit ") file.write("<a href=\"http://davapc1.bioch.dundee.ac.uk/programs/prodrg/\">prodrg</a> ") file.write("to convert these ligands into mol2 format. this ligand can the be ") file.write("parameterized in your pdb2pqr calculation using the peoe_pb methodology via ") file.write("the 'assign charges to the ligand specified in a mol2 file' checkbox:<p>\n") file.write("<blockquote><code>\n") for item in missedligands: file.write("%s<br>\n" % item) file.write("<p></code></blockquote>\n") file.write("if you would like to run pdb2pqr again, please click <a href=\"%s%s\">\n" % (website, webname)) file.write("here</a>.<p>\n") file.write("if you would like to run apbs with these results, please click <a href=\"%s../apbs/index.py?pdb2pqr-id=%s\">here</a>.<p>\n" % (website[:-1], name)) file.write("<p>thank you for using the pdb2pqr server!<p>\n") file.write("<font size=\"-1\"><p>total time on server: %.2f seconds</font><p>\n" % time) file.write("<font size=\"-1\"><center><i>last updated %s</i></center></font>\n" % __date__) file.write("<script type=\"text/javascript\">") file.write("var gaJsHost = ((\"https:\" == document.location.protocol) ? \"https://ssl.\" : \"http://www.\");") file.write("document.write(unescape(\"%3Cscript src=\'\" + gaJsHost + \"google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E\"));") file.write("</script>") file.write("<script type=\"text/javascript\">") file.write("try {") file.write("var pageTracker = _gat._getTracker(\"UA-11026338-3\");") file.write("pageTracker._trackPageview();") file.write("} catch(err) {}</script>") file.write("</body>\n") file.write("</html>\n") def checkprogress(jobid=None,appServicePort=None,calctype=None): if have_opal: try: status=appServicePort.queryStatus(queryStatusRequest(jobid)) except Exception, e: return ["error"] if status._code == 4: return ["error"] if status._code == 8: return ["complete",status] else: return ["running",status] else: progress = [] file = open('%s%s%s/%s_status' % (INSTALLDIR,TMPDIR,jobid, form["calctype"].value)) for line in file.readlines(): progress.append(string.strip(line)) file.close() return progress
BSD 2-Clause Simplified License
devopshq/vspheretools
pysphere/ZSI/wstools/XMLname.py
fromXMLname
python
def fromXMLname(string): retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
Convert XML name to unicode string.
https://github.com/devopshq/vspheretools/blob/10890423bfbba976e3ddee61204e9eed4b73fe92/pysphere/ZSI/wstools/XMLname.py#L80-L90
ident = "$Id$" from re import sub def _NCNameChar(x): return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_" def _NCNameStartChar(x): return x.isalpha() or x=="_" def _toUnicodeHex(x): hexval = hex(ord(x[0]))[2:] hexlen = len(hexval) if (hexlen==1): hexval = "000" + hexval elif (hexlen==2): hexval = "00" + hexval elif (hexlen==3): hexval = "0" + hexval elif (hexlen==4): hexval = "" + hexval elif (hexlen==5): hexval = "000" + hexval elif (hexlen==6): hexval = "00" + hexval elif (hexlen==7): hexval = "0" + hexval elif (hexlen==8): hexval = "" + hexval else: raise Exception("Illegal Value returned from hex(ord(x))") return "_x"+ hexval + "_" def _fromUnicodeHex(x): return eval( r'u"\u'+x[2:-1]+'"' ) def toXMLname(string): if string.find(':') != -1 : (prefix, localname) = string.split(':',1) else: prefix = None localname = string T = unicode(localname) N = len(localname) X = []; for i in range(N) : if i< N-1 and T[i]==u'_' and T[i+1]==u'x': X.append(u'_x005F_') elif i==0 and N >= 3 and ( T[0]==u'x' or T[0]==u'X' ) and ( T[1]==u'm' or T[1]==u'M' ) and ( T[2]==u'l' or T[2]==u'L' ): X.append(u'_xFFFF_' + T[0]) elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])): X.append(_toUnicodeHex(T[i])) else: X.append(T[i]) if prefix: return "%s:%s" % (prefix, u''.join(X)) return u''.join(X)
MIT License
common-workflow-language/schema_salad
schema_salad/schema.py
print_fieldrefs
python
def print_fieldrefs(doc: List[Dict[str, Any]], loader: Loader, stream: IO[Any]) -> None: obj = extend_and_specialize(doc, loader) primitives = { "http://www.w3.org/2001/XMLSchema#string", "http://www.w3.org/2001/XMLSchema#boolean", "http://www.w3.org/2001/XMLSchema#int", "http://www.w3.org/2001/XMLSchema#long", saladp + "null", saladp + "enum", saladp + "array", saladp + "record", saladp + "Any", } stream.write("digraph {\n") for entry in obj: if entry.get("abstract"): continue if entry["type"] == "record": label = shortname(entry["name"]) for field in entry.get("fields", []): found = set() field_name = shortname(field["name"]) replace_type(field["type"], {}, loader, found, find_embeds=False) for each_type in found: if each_type not in primitives: stream.write( '"{}" -> "{}" [label="{}"];\n'.format( label, shortname(each_type), field_name ) ) stream.write("}\n")
Write a GraphViz graph of the relationships between the fields.
https://github.com/common-workflow-language/schema_salad/blob/73fc4f9866ab2ba55cd1fe696f06b4bb7e36ab89/schema_salad/schema.py#L768-L801
import copy import hashlib from typing import ( IO, Any, Dict, List, Mapping, MutableMapping, MutableSequence, Optional, Set, Tuple, TypeVar, Union, cast, ) from urllib.parse import urlparse from pkg_resources import resource_stream from ruamel.yaml.comments import CommentedMap, CommentedSeq from schema_salad.utils import ( CacheType, ResolveType, add_dictlist, aslist, convert_to_dict, flatten, json_dumps, yaml_no_ts, ) from . import _logger, jsonld_context, ref_resolver, validate from .avro.schema import Names, SchemaParseException, make_avsc_object from .exceptions import ( ClassValidationException, SchemaSaladException, ValidationException, ) from .ref_resolver import Loader from .sourceline import SourceLine, add_lc_filename, relname SALAD_FILES = ( "metaschema.yml", "metaschema_base.yml", "salad.md", "field_name.yml", "import_include.md", "link_res.yml", "ident_res.yml", "vocab_res.yml", "vocab_res.yml", "field_name_schema.yml", "field_name_src.yml", "field_name_proc.yml", "ident_res_schema.yml", "ident_res_src.yml", "ident_res_proc.yml", "link_res_schema.yml", "link_res_src.yml", "link_res_proc.yml", "vocab_res_schema.yml", "vocab_res_src.yml", "vocab_res_proc.yml", "map_res.yml", "map_res_schema.yml", "map_res_src.yml", "map_res_proc.yml", "typedsl_res.yml", "typedsl_res_schema.yml", "typedsl_res_src.yml", "typedsl_res_proc.yml", "sfdsl_res.yml", "sfdsl_res_schema.yml", "sfdsl_res_src.yml", "sfdsl_res_proc.yml", ) saladp = "https://w3id.org/cwl/salad#" def get_metaschema() -> Tuple[Names, List[Dict[str, str]], Loader]: loader = ref_resolver.Loader( { "Any": saladp + "Any", "ArraySchema": saladp + "ArraySchema", "Array_symbol": saladp + "ArraySchema/type/Array_symbol", "DocType": saladp + "DocType", "Documentation": saladp + "Documentation", "Documentation_symbol": saladp + "Documentation/type/Documentation_symbol", "Documented": saladp + "Documented", "EnumSchema": saladp + "EnumSchema", "Enum_symbol": saladp + "EnumSchema/type/Enum_symbol", "JsonldPredicate": saladp + "JsonldPredicate", "NamedType": saladp + "NamedType", "PrimitiveType": saladp + "PrimitiveType", "RecordField": saladp + "RecordField", "RecordSchema": saladp + "RecordSchema", "Record_symbol": saladp + "RecordSchema/type/Record_symbol", "SaladEnumSchema": saladp + "SaladEnumSchema", "SaladRecordField": saladp + "SaladRecordField", "SaladRecordSchema": saladp + "SaladRecordSchema", "SchemaDefinedType": saladp + "SchemaDefinedType", "SpecializeDef": saladp + "SpecializeDef", "_container": saladp + "JsonldPredicate/_container", "_id": {"@id": saladp + "_id", "@type": "@id", "identity": True}, "_type": saladp + "JsonldPredicate/_type", "abstract": saladp + "SaladRecordSchema/abstract", "array": saladp + "array", "boolean": "http://www.w3.org/2001/XMLSchema#boolean", "dct": "http://purl.org/dc/terms/", "default": {"@id": saladp + "default", "noLinkCheck": True}, "doc": "rdfs:comment", "docAfter": {"@id": saladp + "docAfter", "@type": "@id"}, "docChild": {"@id": saladp + "docChild", "@type": "@id"}, "docParent": {"@id": saladp + "docParent", "@type": "@id"}, "documentRoot": saladp + "SchemaDefinedType/documentRoot", "documentation": saladp + "documentation", "double": "http://www.w3.org/2001/XMLSchema#double", "enum": saladp + "enum", "extends": {"@id": saladp + "extends", "@type": "@id", "refScope": 1}, "fields": { "@id": saladp + "fields", "mapPredicate": "type", "mapSubject": "name", }, "float": "http://www.w3.org/2001/XMLSchema#float", "identity": saladp + "JsonldPredicate/identity", "inVocab": saladp + "NamedType/inVocab", "int": "http://www.w3.org/2001/XMLSchema#int", "items": {"@id": saladp + "items", "@type": "@vocab", "refScope": 2}, "jsonldPredicate": "sld:jsonldPredicate", "long": "http://www.w3.org/2001/XMLSchema#long", "mapPredicate": saladp + "JsonldPredicate/mapPredicate", "mapSubject": saladp + "JsonldPredicate/mapSubject", "name": "@id", "noLinkCheck": saladp + "JsonldPredicate/noLinkCheck", "null": saladp + "null", "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", "record": saladp + "record", "refScope": saladp + "JsonldPredicate/refScope", "sld": saladp, "specialize": { "@id": saladp + "specialize", "mapPredicate": "specializeTo", "mapSubject": "specializeFrom", }, "specializeFrom": { "@id": saladp + "specializeFrom", "@type": "@id", "refScope": 1, }, "specializeTo": { "@id": saladp + "specializeTo", "@type": "@id", "refScope": 1, }, "string": "http://www.w3.org/2001/XMLSchema#string", "subscope": saladp + "JsonldPredicate/subscope", "symbols": {"@id": saladp + "symbols", "@type": "@id", "identity": True}, "type": { "@id": saladp + "type", "@type": "@vocab", "refScope": 2, "typeDSL": True, }, "typeDSL": saladp + "JsonldPredicate/typeDSL", "xsd": "http://www.w3.org/2001/XMLSchema#", } ) for salad in SALAD_FILES: with resource_stream("schema_salad", "metaschema/" + salad) as stream: loader.cache["https://w3id.org/cwl/" + salad] = stream.read().decode( "UTF-8" ) with resource_stream("schema_salad", "metaschema/metaschema.yml") as stream: loader.cache["https://w3id.org/cwl/salad"] = stream.read().decode("UTF-8") yaml = yaml_no_ts() j = yaml.load(loader.cache["https://w3id.org/cwl/salad"]) add_lc_filename(j, "metaschema.yml") j2 = loader.resolve_all(j, saladp)[0] if not isinstance(j2, list): _logger.error("%s", j2) raise SchemaParseException(f"Not a list: {j2}") else: sch_obj = make_avro(j2, loader, loader.vocab) try: sch_names = make_avro_schema_from_avro(sch_obj) except SchemaParseException: _logger.error("Metaschema error, avro was:\n%s", json_dumps(sch_obj, indent=4)) raise validate_doc(sch_names, j2, loader, strict=True) return (sch_names, j2, loader) def add_namespaces( metadata: Mapping[str, Any], namespaces: MutableMapping[str, str] ) -> None: for key, value in metadata.items(): if key not in namespaces: namespaces[key] = value elif namespaces[key] != value: raise ValidationException( "Namespace prefix '{}' has conflicting definitions '{}'" " and '{}'.".format(key, namespaces[key], value) ) def collect_namespaces(metadata: Mapping[str, Any]) -> Dict[str, str]: namespaces = {} if "$import_metadata" in metadata: for value in metadata["$import_metadata"].values(): add_namespaces(collect_namespaces(value), namespaces) if "$namespaces" in metadata: add_namespaces(metadata["$namespaces"], namespaces) return namespaces schema_type = Tuple[Loader, Union[Names, SchemaParseException], Dict[str, Any], Loader] def load_schema( schema_ref: ResolveType, cache: Optional[CacheType] = None, ) -> schema_type: metaschema_names, _metaschema_doc, metaschema_loader = get_metaschema() if cache is not None: metaschema_loader.cache.update(cache) schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "") if not isinstance(schema_doc, MutableSequence): raise ValidationException("Schema reference must resolve to a list.") validate_doc(metaschema_names, schema_doc, metaschema_loader, True) metactx = schema_metadata.get("@context", {}) metactx.update(collect_namespaces(schema_metadata)) schema_ctx = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)[0] document_loader = Loader(schema_ctx, cache=cache) avsc_names = make_avro_schema(schema_doc, document_loader, metaschema_loader.vocab) return document_loader, avsc_names, schema_metadata, metaschema_loader def load_and_validate( document_loader: Loader, avsc_names: Names, document: Union[CommentedMap, str], strict: bool, strict_foreign_properties: bool = False, ) -> Tuple[Any, Dict[str, Any]]: try: if isinstance(document, CommentedMap): data, metadata = document_loader.resolve_all( document, document["id"], checklinks=True, strict_foreign_properties=strict_foreign_properties, ) else: data, metadata = document_loader.resolve_ref( document, checklinks=True, strict_foreign_properties=strict_foreign_properties, ) validate_doc( avsc_names, data, document_loader, strict, strict_foreign_properties=strict_foreign_properties, ) except ValidationException as exc: raise ValidationException("", None, [exc]) from exc return data, metadata def validate_doc( schema_names: Names, doc: ResolveType, loader: Loader, strict: bool, strict_foreign_properties: bool = False, ) -> None: has_root = False for root in schema_names.names.values(): if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or ( "documentRoot" in root.props ): has_root = True break if not has_root: raise ValidationException("No document roots defined in the schema") if isinstance(doc, MutableSequence): vdoc = doc elif isinstance(doc, CommentedMap): vdoc = CommentedSeq([doc]) vdoc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col]) vdoc.lc.filename = doc.lc.filename else: raise ValidationException("Document must be dict or list") roots = [] for root in schema_names.names.values(): if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or ( root.props.get("documentRoot") ): roots.append(root) anyerrors = [] for pos, item in enumerate(vdoc): sourceline = SourceLine(vdoc, pos, str) success = False for root in roots: success = validate.validate_ex( root, item, loader.identifiers, strict, foreign_properties=loader.foreign_properties, raise_ex=False, skip_foreign_properties=loader.skip_schemas, strict_foreign_properties=strict_foreign_properties, vocab=loader.vocab, ) if success: break if not success: errors = [] for root in roots: if hasattr(root, "get_prop"): name = root.get_prop("name") elif hasattr(root, "name"): name = root.name try: validate.validate_ex( root, item, loader.identifiers, strict, foreign_properties=loader.foreign_properties, raise_ex=True, skip_foreign_properties=loader.skip_schemas, strict_foreign_properties=strict_foreign_properties, vocab=loader.vocab, ) except ClassValidationException as exc1: errors = [ ClassValidationException( f"tried `{validate.friendly(name)}` but", sourceline, [exc1] ) ] break except ValidationException as exc2: errors.append( ValidationException( f"tried `{validate.friendly(name)}` but", sourceline, [exc2] ) ) objerr = "Invalid" for ident in loader.identifiers: if ident in item: objerr = "Object `{}` is not valid because".format( relname(item[ident]) ) break anyerrors.append(ValidationException(objerr, sourceline, errors, "-")) if anyerrors: raise ValidationException("", None, anyerrors, "*") def get_anon_name( rec: MutableMapping[str, Union[str, Dict[str, str], List[str]]] ) -> str: if "name" in rec: name = rec["name"] if isinstance(name, str): return name raise ValidationException(f"Expected name field to be a string, was {name}") anon_name = "" if rec["type"] in ("enum", saladp + "enum"): for sym in rec["symbols"]: anon_name += sym return ( "anon.enum_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() ) if rec["type"] in ("record", saladp + "record"): for field in rec["fields"]: if isinstance(field, Mapping): anon_name += field["name"] else: raise ValidationException( "Expected entries in 'fields' to also be maps, was {}.".format( field ) ) return "record_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() if rec["type"] in ("array", saladp + "array"): return "" raise ValidationException("Expected enum or record, was {}".format(rec["type"])) def replace_type( items: Any, spec: Dict[str, Any], loader: Loader, found: Set[str], find_embeds: bool = True, deepen: bool = True, ) -> Any: if isinstance(items, MutableMapping): if items.get("type") in ("record", "enum") and items.get("name"): if items["name"] in found: return items["name"] found.add(items["name"]) if not deepen: return items items = copy.copy(items) if not items.get("name"): items["name"] = get_anon_name(items) for name in ("type", "items", "fields"): if name in items: items[name] = replace_type( items[name], spec, loader, found, find_embeds=find_embeds, deepen=find_embeds, ) if isinstance(items[name], MutableSequence): items[name] = flatten(items[name]) return items if isinstance(items, MutableSequence): return [ replace_type(i, spec, loader, found, find_embeds=find_embeds, deepen=deepen) for i in items ] if isinstance(items, str): replace_with = None if items in loader.vocab: items = loader.vocab[items] if items in spec: replace_with = spec[items] if replace_with: return replace_type( replace_with, spec, loader, found, find_embeds=find_embeds ) found.add(items) return items def avro_field_name(url: str) -> str: d = urlparse(url) if d.fragment: return d.fragment.split("/")[-1] return d.path.split("/")[-1] Avro = TypeVar("Avro", MutableMapping[str, Any], MutableSequence[Any], str) def make_valid_avro( items: Avro, alltypes: Dict[str, Dict[str, Any]], found: Set[str], union: bool = False, fielddef: bool = False, vocab: Optional[Dict[str, str]] = None, ) -> Union[ Avro, MutableMapping[str, str], str, List[Union[Any, MutableMapping[str, str], str]] ]: if vocab is None: _, _, metaschema_loader = get_metaschema() vocab = metaschema_loader.vocab if isinstance(items, MutableMapping): avro = copy.copy(items) if avro.get("name") and avro.get("inVocab", True): if fielddef: avro["name"] = avro_field_name(avro["name"]) else: avro["name"] = validate.avro_type_name(avro["name"]) if "type" in avro and avro["type"] in ( saladp + "record", saladp + "enum", "record", "enum", ): if (hasattr(avro, "get") and avro.get("abstract")) or ("abstract" in avro): return avro if avro["name"] in found: return cast(str, avro["name"]) found.add(avro["name"]) for field in ("type", "items", "values", "fields"): if field in avro: avro[field] = make_valid_avro( avro[field], alltypes, found, union=True, fielddef=(field == "fields"), vocab=vocab, ) if "symbols" in avro: avro["symbols"] = [avro_field_name(sym) for sym in avro["symbols"]] return avro if isinstance(items, MutableSequence): ret = [] for i in items: ret.append( make_valid_avro( i, alltypes, found, union=union, fielddef=fielddef, vocab=vocab ) ) return ret if union and isinstance(items, str): if items in alltypes and validate.avro_type_name(items) not in found: return make_valid_avro( alltypes[items], alltypes, found, union=union, vocab=vocab ) if items in vocab: return validate.avro_type_name(vocab[items]) else: return validate.avro_type_name(items) else: return items def deepcopy_strip(item: Any) -> Any: if isinstance(item, MutableMapping): return {k: deepcopy_strip(v) for k, v in item.items()} if isinstance(item, MutableSequence): return [deepcopy_strip(k) for k in item] return item def extend_and_specialize( items: List[Dict[str, Any]], loader: Loader ) -> List[Dict[str, Any]]: items2 = deepcopy_strip(items) types = {i["name"]: i for i in items2} results = [] for stype in items2: if "extends" in stype: specs = {} if "specialize" in stype: for spec in aslist(stype["specialize"]): specs[spec["specializeFrom"]] = spec["specializeTo"] exfields = [] exsym = [] for ex in aslist(stype["extends"]): if ex not in types: raise ValidationException( "Extends {} in {} refers to invalid base type.".format( stype["extends"], stype["name"] ) ) basetype = copy.copy(types[ex]) if stype["type"] == "record": if specs: basetype["fields"] = replace_type( basetype.get("fields", []), specs, loader, set() ) for field in basetype.get("fields", []): if "inherited_from" not in field: field["inherited_from"] = ex exfields.extend(basetype.get("fields", [])) elif stype["type"] == "enum": exsym.extend(basetype.get("symbols", [])) if stype["type"] == "record": stype = copy.copy(stype) exfields.extend(stype.get("fields", [])) stype["fields"] = exfields fieldnames = set() for field in stype["fields"]: if field["name"] in fieldnames: raise ValidationException( "Field name {} appears twice in {}".format( field["name"], stype["name"] ) ) else: fieldnames.add(field["name"]) elif stype["type"] == "enum": stype = copy.copy(stype) exsym.extend(stype.get("symbols", [])) stype["symbol"] = exsym types[stype["name"]] = stype results.append(stype) ex_types = {} for result in results: ex_types[result["name"]] = result extended_by = {} for result in results: if "extends" in result: for ex in aslist(result["extends"]): if ex_types[ex].get("abstract"): add_dictlist(extended_by, ex, ex_types[result["name"]]) add_dictlist(extended_by, validate.avro_type_name(ex), ex_types[ex]) for result in results: if result.get("abstract") and result["name"] not in extended_by: raise ValidationException( "{} is abstract but missing a concrete subtype".format(result["name"]) ) for result in results: if "fields" in result: result["fields"] = replace_type( result["fields"], extended_by, loader, set() ) return results def make_avro( i: List[Dict[str, Any]], loader: Loader, metaschema_vocab: Optional[Dict[str, str]] = None, ) -> List[Any]: j = extend_and_specialize(i, loader) name_dict = {} for entry in j: name_dict[entry["name"]] = entry avro = make_valid_avro(j, name_dict, set(), vocab=metaschema_vocab) return [ t for t in avro if isinstance(t, MutableMapping) and not t.get("abstract") and t.get("type") != "org.w3id.cwl.salad.documentation" ] def make_avro_schema( i: List[Any], loader: Loader, metaschema_vocab: Optional[Dict[str, str]] = None ) -> Names: names = Names() avro = make_avro(i, loader, metaschema_vocab) make_avsc_object(convert_to_dict(avro), names) return names def make_avro_schema_from_avro(avro: List[Union[Avro, Dict[str, str], str]]) -> Names: names = Names() make_avsc_object(convert_to_dict(avro), names) return names def shortname(inputid: str) -> str: parsed_id = urlparse(inputid) if parsed_id.fragment: return parsed_id.fragment.split("/")[-1] return parsed_id.path.split("/")[-1] def print_inheritance(doc: List[Dict[str, Any]], stream: IO[Any]) -> None: stream.write("digraph {\n") for entry in doc: if entry["type"] == "record": label = name = shortname(entry["name"]) fields = entry.get("fields", []) if fields: label += "\\n* {}\\l".format( "\\l* ".join(shortname(field["name"]) for field in fields) ) shape = "ellipse" if entry.get("abstract") else "box" stream.write(f'"{name}" [shape={shape} label="{label}"];\n') if "extends" in entry: for target in aslist(entry["extends"]): stream.write(f'"{shortname(target)}" -> "{name}";\n') stream.write("}\n")
Apache License 2.0
microsoft/qlib
scripts/data_collector/us_index/collector.py
get_instruments
python
def get_instruments( qlib_dir: str, index_name: str, method: str = "parse_instruments", request_retry: int = 5, retry_sleep: int = 3 ): _cur_module = importlib.import_module("data_collector.us_index.collector") obj = getattr(_cur_module, f"{index_name.upper()}Index")( qlib_dir=qlib_dir, index_name=index_name, request_retry=request_retry, retry_sleep=retry_sleep ) getattr(obj, method)()
Parameters ---------- qlib_dir: str qlib data dir, default "Path(__file__).parent/qlib_data" index_name: str index name, value from ["SP500", "NASDAQ100", "DJIA", "SP400"] method: str method, value from ["parse_instruments", "save_new_companies"] request_retry: int request retry, by default 5 retry_sleep: int request sleep, by default 3 Examples ------- # parse instruments $ python collector.py --index_name SP500 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments # parse new companies $ python collector.py --index_name SP500 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies
https://github.com/microsoft/qlib/blob/7c31012b507a3823117bddcc693fc64899460b2a/scripts/data_collector/us_index/collector.py#L247-L278
import abc import sys import importlib from pathlib import Path from concurrent.futures import ThreadPoolExecutor from typing import List import fire import requests import pandas as pd from tqdm import tqdm from loguru import logger CUR_DIR = Path(__file__).resolve().parent sys.path.append(str(CUR_DIR.parent.parent)) from data_collector.index import IndexBase from data_collector.utils import deco_retry, get_calendar_list, get_trading_date_by_shift WIKI_URL = "https://en.wikipedia.org/wiki" WIKI_INDEX_NAME_MAP = { "NASDAQ100": "NASDAQ-100", "SP500": "List_of_S%26P_500_companies", "SP400": "List_of_S%26P_400_companies", "DJIA": "Dow_Jones_Industrial_Average", } class WIKIIndex(IndexBase): INST_PREFIX = "" def __init__(self, index_name: str, qlib_dir: [str, Path] = None, request_retry: int = 5, retry_sleep: int = 3): super(WIKIIndex, self).__init__( index_name=index_name, qlib_dir=qlib_dir, request_retry=request_retry, retry_sleep=retry_sleep ) self._target_url = f"{WIKI_URL}/{WIKI_INDEX_NAME_MAP[self.index_name.upper()]}" @property @abc.abstractmethod def bench_start_date(self) -> pd.Timestamp: raise NotImplementedError("rewrite bench_start_date") @abc.abstractmethod def get_changes(self) -> pd.DataFrame: raise NotImplementedError("rewrite get_changes") @property def calendar_list(self) -> List[pd.Timestamp]: _calendar_list = getattr(self, "_calendar_list", None) if _calendar_list is None: _calendar_list = list(filter(lambda x: x >= self.bench_start_date, get_calendar_list("US_ALL"))) setattr(self, "_calendar_list", _calendar_list) return _calendar_list def _request_new_companies(self) -> requests.Response: resp = requests.get(self._target_url) if resp.status_code != 200: raise ValueError(f"request error: {self._target_url}") return resp def set_default_date_range(self, df: pd.DataFrame) -> pd.DataFrame: _df = df.copy() _df[self.SYMBOL_FIELD_NAME] = _df[self.SYMBOL_FIELD_NAME].str.strip() _df[self.START_DATE_FIELD] = self.bench_start_date _df[self.END_DATE_FIELD] = self.DEFAULT_END_DATE return _df.loc[:, self.INSTRUMENTS_COLUMNS] def get_new_companies(self): logger.info(f"get new companies {self.index_name} ......") _data = deco_retry(retry=self._request_retry, retry_sleep=self._retry_sleep)(self._request_new_companies)() df_list = pd.read_html(_data.text) for _df in df_list: _df = self.filter_df(_df) if (_df is not None) and (not _df.empty): _df.columns = [self.SYMBOL_FIELD_NAME] _df = self.set_default_date_range(_df) logger.info(f"end of get new companies {self.index_name} ......") return _df def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: raise NotImplementedError("rewrite filter_df") class NASDAQ100Index(WIKIIndex): HISTORY_COMPANIES_URL = ( "https://indexes.nasdaqomx.com/Index/WeightingData?id=NDX&tradeDate={trade_date}T00%3A00%3A00.000&timeOfDay=SOD" ) MAX_WORKERS = 16 def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: if len(df) >= 100 and "Ticker" in df.columns: return df.loc[:, ["Ticker"]].copy() @property def bench_start_date(self) -> pd.Timestamp: return pd.Timestamp("2003-01-02") @deco_retry def _request_history_companies(self, trade_date: pd.Timestamp, use_cache: bool = True) -> pd.DataFrame: trade_date = trade_date.strftime("%Y-%m-%d") cache_path = self.cache_dir.joinpath(f"{trade_date}_history_companies.pkl") if cache_path.exists() and use_cache: df = pd.read_pickle(cache_path) else: url = self.HISTORY_COMPANIES_URL.format(trade_date=trade_date) resp = requests.post(url) if resp.status_code != 200: raise ValueError(f"request error: {url}") df = pd.DataFrame(resp.json()["aaData"]) df[self.DATE_FIELD_NAME] = trade_date df.rename(columns={"Name": "name", "Symbol": self.SYMBOL_FIELD_NAME}, inplace=True) if not df.empty: df.to_pickle(cache_path) return df def get_history_companies(self): logger.info(f"start get history companies......") all_history = [] error_list = [] with tqdm(total=len(self.calendar_list)) as p_bar: with ThreadPoolExecutor(max_workers=self.MAX_WORKERS) as executor: for _trading_date, _df in zip( self.calendar_list, executor.map(self._request_history_companies, self.calendar_list) ): if _df.empty: error_list.append(_trading_date) else: all_history.append(_df) p_bar.update() if error_list: logger.warning(f"get error: {error_list}") logger.info(f"total {len(self.calendar_list)}, error {len(error_list)}") logger.info(f"end of get history companies.") return pd.concat(all_history, sort=False) def get_changes(self): return self.get_changes_with_history_companies(self.get_history_companies()) class DJIAIndex(WIKIIndex): @property def bench_start_date(self) -> pd.Timestamp: return pd.Timestamp("2000-01-01") def get_changes(self) -> pd.DataFrame: pass def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: if "Symbol" in df.columns: _df = df.loc[:, ["Symbol"]].copy() _df["Symbol"] = _df["Symbol"].apply(lambda x: x.split(":")[-1]) return _df def parse_instruments(self): logger.warning(f"No suitable data source has been found!") class SP500Index(WIKIIndex): WIKISP500_CHANGES_URL = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies" @property def bench_start_date(self) -> pd.Timestamp: return pd.Timestamp("1999-01-01") def get_changes(self) -> pd.DataFrame: logger.info(f"get sp500 history changes......") changes_df = pd.read_html(self.WIKISP500_CHANGES_URL)[-1] changes_df = changes_df.iloc[:, [0, 1, 3]] changes_df.columns = [self.DATE_FIELD_NAME, self.ADD, self.REMOVE] changes_df[self.DATE_FIELD_NAME] = pd.to_datetime(changes_df[self.DATE_FIELD_NAME]) _result = [] for _type in [self.ADD, self.REMOVE]: _df = changes_df.copy() _df[self.CHANGE_TYPE_FIELD] = _type _df[self.SYMBOL_FIELD_NAME] = _df[_type] _df.dropna(subset=[self.SYMBOL_FIELD_NAME], inplace=True) if _type == self.ADD: _df[self.DATE_FIELD_NAME] = _df[self.DATE_FIELD_NAME].apply( lambda x: get_trading_date_by_shift(self.calendar_list, x, 0) ) else: _df[self.DATE_FIELD_NAME] = _df[self.DATE_FIELD_NAME].apply( lambda x: get_trading_date_by_shift(self.calendar_list, x, -1) ) _result.append(_df[[self.DATE_FIELD_NAME, self.CHANGE_TYPE_FIELD, self.SYMBOL_FIELD_NAME]]) logger.info(f"end of get sp500 history changes.") return pd.concat(_result, sort=False) def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: if "Symbol" in df.columns: return df.loc[:, ["Symbol"]].copy() class SP400Index(WIKIIndex): @property def bench_start_date(self) -> pd.Timestamp: return pd.Timestamp("2000-01-01") def get_changes(self) -> pd.DataFrame: pass def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: if "Ticker symbol" in df.columns: return df.loc[:, ["Ticker symbol"]].copy() def parse_instruments(self): logger.warning(f"No suitable data source has been found!")
MIT License
nextdoor/ndscheduler
ndscheduler/server/handlers/jobs.py
Handler.post
python
def post(self): self._validate_post_data() job_id = self.scheduler_manager.add_job(**self.json_args) self.datastore.add_audit_log(job_id, self.json_args['name'], constants.AUDIT_LOG_ADDED, user=self.username) response = { 'job_id': job_id} self.set_status(201) self.write(response)
Adds a job. add_job() is a non-blocking operation, but audit log is a blocking operation. Handles an endpoint: POST /api/v1/jobs
https://github.com/nextdoor/ndscheduler/blob/d31016aaca480e38a69d75a66a9978a937c6a0b0/ndscheduler/server/handlers/jobs.py#L120-L141
import json import tornado.concurrent import tornado.gen import tornado.web from ndscheduler.corescheduler import constants from ndscheduler.corescheduler import utils from ndscheduler.server.handlers import base class Handler(base.BaseHandler): def _get_jobs(self): jobs = self.scheduler_manager.get_jobs() return_json = [] for job in jobs: return_json.append(self._build_job_dict(job)) return {'jobs': return_json} def _build_job_dict(self, job): if job.next_run_time: next_run_time = job.next_run_time.isoformat() else: next_run_time = '' return_dict = { 'job_id': job.id, 'name': job.name, 'next_run_time': next_run_time, 'job_class_string': utils.get_job_name(job), 'pub_args': utils.get_job_args(job)} return_dict.update(utils.get_cron_strings(job)) return return_dict @tornado.concurrent.run_on_executor def get_jobs(self): return self._get_jobs() @tornado.gen.engine def get_jobs_yield(self): return_json = yield self.get_jobs() self.finish(return_json) def _get_job(self, job_id): job = self.scheduler_manager.get_job(job_id) if not job: self.set_status(400) return {'error': 'Job not found: %s' % job_id} return self._build_job_dict(job) @tornado.concurrent.run_on_executor def get_job(self, job_id): return self._get_job(job_id) @tornado.gen.engine def get_job_yield(self, job_id): return_json = yield self.get_job(job_id) self.finish(return_json) @tornado.web.removeslash @tornado.web.asynchronous @tornado.gen.engine def get(self, job_id=None): if job_id is None: self.get_jobs_yield() else: self.get_job_yield(job_id) @tornado.web.removeslash
BSD 2-Clause Simplified License
cartodb/cartoframes
cartoframes/data/observatory/catalog/entity.py
CatalogEntity.to_series
python
def to_series(self): return pd.Series(self.data)
Converts the entity instance to a pandas Series.
https://github.com/cartodb/cartoframes/blob/7c7392be5d15d0472ff428546c4791ed1a3842b0/cartoframes/data/observatory/catalog/entity.py#L99-L101
import pandas as pd from abc import ABC from geopandas import GeoDataFrame from carto.do_dataset import DODataset from . import subscriptions from ....utils.geom_utils import set_geometry from ....utils.logger import log _DATASET_READ_MSG = '''To load it as a DataFrame you can do: df = pandas.read_csv('{}') ''' _GEOGRAPHY_READ_MSG = '''To load it as a GeoDataFrame you can do: from cartoframes.utils import decode_geometry df = pandas.read_csv('{}') gdf = GeoDataFrame(df, geometry=decode_geometry(df['geom'])) ''' GEOM_COL = 'geom' class CatalogEntity(ABC): id_field = 'id' _entity_repo = None export_excluded_fields = ['summary_json', 'geom_coverage'] def __init__(self, data): self.data = data @property def id(self): return self.data[self.id_field] @property def slug(self): try: return self.data['slug'] except KeyError: return None @classmethod def get(cls, id_): return cls._entity_repo.get_by_id(id_) @classmethod def get_all(cls, filters=None): return cls._entity_repo.get_all(filters) @classmethod def get_list(cls, id_list): return cls._entity_repo.get_by_id_list(id_list)
BSD 3-Clause New or Revised License
mila-iqia/myia
myia/operations/prim_env_getitem.py
infer_env_getitem
python
async def infer_env_getitem( self, engine, env: xtype.EnvType, key: xtype.SymbolicKeyType, dflt ): expected = key.xvalue().abstract engine.abstract_merge(expected, dflt) return expected
Infer the return type of primitive `env_getitem`.
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/operations/prim_env_getitem.py#L14-L20
from .. import xtype from ..lib import standard_prim from . import primitives as P def pyimpl_env_getitem(env, key, default): return env.get(key, default) @standard_prim(P.env_getitem)
MIT License
nyaruka/smartmin
smartmin/views.py
SmartFormMixin.get_form_class
python
def get_form_class(self): if self.form_class: form_class = self.form_class else: if self.model is not None: model = self.model elif hasattr(self, 'object') and self.object is not None: model = self.object.__class__ else: model = self.get_queryset().model factory_kwargs = self.get_factory_kwargs() form_class = model_forms.modelform_factory(model, **factory_kwargs) return form_class
Returns the form class to use in this view
https://github.com/nyaruka/smartmin/blob/edaf589aee9de050e505059e30d61d0331908ef0/smartmin/views.py#L967-L991
import json import operator from functools import reduce import django.forms.models as model_forms from django import forms from django.conf import settings from django.conf.urls import url from django.contrib import messages from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.exceptions import ImproperlyConfigured from django.urls import reverse from django.db import IntegrityError from django.db.models import Q from django.http import HttpResponseRedirect, HttpResponse, JsonResponse from django.utils.encoding import force_text from django.utils.http import urlquote from django.utils.translation import ugettext_lazy as _ from django.views.generic.edit import ModelFormMixin, UpdateView, CreateView, ProcessFormView, FormView from django.views.generic.base import TemplateView from django.views.generic import DetailView, ListView from smartmin.csv_imports.models import ImportTask from smartmin.mixins import NonAtomicMixin from . import widgets def smart_url(url, obj=None): if url.find("@") >= 0: (args, value) = url.split('@') if args: val = getattr(obj, args, None) return reverse(value, args=[val]) else: return reverse(value) else: if obj is None: return url else: return url % obj.id class SmartView: fields = None exclude = None field_config = {} title = None refresh = 0 template_name = None pjax = None url_name = None crudl = None def __init__(self, *args): self.extra_context = {} super(SmartView, self).__init__() def derive_title(self): return self.title @classmethod def derive_url_pattern(cls, path, action): return r'^%s/%s/$' % (path, action) def has_permission(self, request, *args, **kwargs): self.kwargs = kwargs self.args = args self.request = request if not getattr(self, 'permission', None): return True else: return request.user.has_perm(self.permission) def dispatch(self, request, *args, **kwargs): def wrapper(request, *args, **kwargs): if not self.has_permission(request, *args, **kwargs): path = urlquote(request.get_full_path()) login_url = kwargs.pop('login_url', settings.LOGIN_URL) redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME) return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path)) else: response = self.pre_process(request, *args, **kwargs) if not response: return super(SmartView, self).dispatch(request, *args, **kwargs) else: return response return wrapper(request, *args, **kwargs) def pre_process(self, request, *args, **kwargs): return None def lookup_obj_attribute(self, obj, field): curr_field = field.encode('ascii', 'ignore').decode("utf-8") rest = None if field.find('.') >= 0: curr_field = field.split('.')[0] rest = '.'.join(field.split('.')[1:]) obj_field = getattr(obj, curr_field, None) if obj_field and getattr(obj_field, '__call__', None): obj_field = obj_field() if obj_field and rest: return self.lookup_obj_attribute(obj_field, rest) else: return obj_field def lookup_field_value(self, context, obj, field): curr_field = field.encode('ascii', 'ignore').decode("utf-8") if field.find('.') == -1: view_method = getattr(self, 'get_%s' % curr_field, None) if view_method: return view_method(obj) return self.lookup_obj_attribute(obj, field) def lookup_field_label(self, context, field, default=None): if field.find('.') >= 0: return self.lookup_field_label(context, field.split('.')[-1], default) label = None if field in self.field_config and 'label' in self.field_config[field]: label = self.field_config[field]['label'] elif default: label = default else: for model_field in self.model._meta.fields: if model_field.name == field: return model_field.verbose_name.title() if label is None: label = self.derive_field_label(field) return label def lookup_field_help(self, field, default=None): help = None if field in self.field_config and 'help' in self.field_config[field]: help = self.field_config[field]['help'] elif default: help = default elif hasattr(self, 'model'): for model_field in self.model._meta.fields: if model_field.name == field: help = model_field.help_text break return help def lookup_field_class(self, field, obj=None, default=None): css = "" if field in self.field_config and 'class' in self.field_config[field]: css = self.field_config[field]['class'] elif default: css = default return css def derive_field_label(self, field, obj=None): label = field.replace('_', ' ').title() return label def derive_field_config(self): return self.field_config def get_template_names(self): templates = [] if getattr(self, 'template_name', None): templates.append(self.template_name) if getattr(self, 'default_template', None): templates.append(self.default_template) else: templates = super(SmartView, self).get_template_names() return templates def derive_fields(self): fields = [] if self.fields: fields.append(self.fields) return fields def derive_exclude(self): exclude = [] if self.exclude: exclude += self.exclude return exclude def derive_refresh(self): return self.refresh def get_context_data(self, **kwargs): context = super(SmartView, self).get_context_data(**kwargs) self.field_config = self.derive_field_config() self.fields = self.derive_fields() url_params = "?" order_params = "" for key in self.request.GET.keys(): if key != 'page' and key != 'pjax' and (len(key) == 0 or key[0] != '_'): for value in self.request.GET.getlist(key): url_params += "%s=%s&" % (urlquote(key), urlquote(value)) elif key == '_order': order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.GET.getlist(key)]) context['url_params'] = url_params context['order_params'] = order_params + "&" context['pjax'] = self.pjax context['blocks'] = dict() context['fields'] = self.fields context['view'] = self context['field_config'] = self.field_config context['title'] = self.derive_title() context.update(self.extra_context) base_template = "base.html" if 'pjax' in self.request.GET or 'pjax' in self.request.POST: base_template = "smartmin/pjax.html" if 'HTTP_X_PJAX' in self.request.META: base_template = "smartmin/pjax.html" context['base_template'] = base_template refresh = self.derive_refresh() if refresh: context['refresh'] = refresh return context def as_json(self, context): raise NotImplementedError("this view can't be rendered as JSON") def render_to_response(self, context, **response_kwargs): if '_format' in self.request.GET and self.request.GET['_format'] == 'json': try: return JsonResponse(self.as_json(context), safe=False) except NotImplementedError: pass return super(SmartView, self).render_to_response(context) class SmartTemplateView(SmartView, TemplateView): pass def derive_single_object_url_pattern(slug_url_kwarg, path, action): if slug_url_kwarg: return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg) else: return r'^%s/%s/(?P<pk>\d+)/$' % (path, action) class SmartSingleObjectView(SmartView): slug_field = None slug_url_kwarg = None def get_slug_field(self): return self.slug_field if self.slug_field else self.slug_url_kwarg class SmartReadView(SmartSingleObjectView, DetailView): default_template = 'smartmin/read.html' edit_button = None field_config = {'modified_blurb': dict(label="Modified"), 'created_blurb': dict(label="Created")} @classmethod def derive_url_pattern(cls, path, action): return derive_single_object_url_pattern(cls.slug_url_kwarg, path, action) def derive_queryset(self): return super(SmartReadView, self).get_queryset() def get_queryset(self): self.queryset = self.derive_queryset() return self.queryset def derive_title(self): return str(self.object) def derive_fields(self): if self.fields: return list(self.fields) else: fields = [] for field in self.object._meta.fields: fields.append(field.name) exclude = self.derive_exclude() fields = [field for field in fields if field not in exclude] return fields def get_modified_blurb(self, obj): return "%s by %s" % (obj.modified_on.strftime("%B %d, %Y at %I:%M %p"), obj.modified_by) def get_created_blurb(self, obj): return "%s by %s" % (obj.created_on.strftime("%B %d, %Y at %I:%M %p"), obj.created_by) class SmartDeleteView(SmartSingleObjectView, DetailView, ProcessFormView): default_template = 'smartmin/delete_confirm.html' name_field = 'name' cancel_url = None redirect_url = None @classmethod def derive_url_pattern(cls, path, action): return derive_single_object_url_pattern(cls.slug_url_kwarg, path, action) def get_cancel_url(self): if not self.cancel_url: raise ImproperlyConfigured("DeleteView must define a cancel_url") return smart_url(self.cancel_url, self.object) def pre_delete(self, obj): if self.request.user.id and self.request.user.id > 0 and hasattr(obj, 'modified_by_id'): obj.modified_by = self.request.user def post(self, request, *args, **kwargs): self.object = self.get_object() self.pre_delete(self.object) redirect_url = self.get_redirect_url() self.object.delete() return HttpResponseRedirect(redirect_url) def get_redirect_url(self, **kwargs): if not self.redirect_url: raise ImproperlyConfigured("DeleteView must define a redirect_url") return smart_url(self.redirect_url) def get_context_data(self, **kwargs): context = super(SmartDeleteView, self).get_context_data(**kwargs) context['name_field'] = self.name_field context['cancel_url'] = self.get_cancel_url() return context class SmartListView(SmartView, ListView): default_template = 'smartmin/list.html' link_url = None link_fields = None add_button = None search_fields = None paginate_by = 25 field_config = {'is_active': dict(label='')} default_order = None select_related = None @classmethod def derive_url_pattern(cls, path, action): if action == 'list': return r'^%s/$' % (path) else: return r'^%s/%s/$' % (path, action) def derive_search_fields(self): return self.search_fields def derive_title(self): title = super(SmartListView, self).derive_title() if not title: return force_text(self.model._meta.verbose_name_plural).title() else: return title def derive_link_fields(self, context): if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields def lookup_field_link(self, context, field, obj): return smart_url(self.link_url, obj) def lookup_field_orderable(self, field): try: self.model._meta.get_field_by_name(field) return True except Exception: return False def get_context_data(self, **kwargs): context = super(SmartListView, self).get_context_data(**kwargs) self.link_fields = self.derive_link_fields(context) context['link_fields'] = self.link_fields if 'search' in self.request.GET: context['search'] = self.request.GET['search'] order = self.derive_ordering() if order: if order[0] == '-': context['order'] = order[1:] context['order_asc'] = False else: context['order'] = order context['order_asc'] = True return context def derive_select_related(self): return self.select_related def derive_queryset(self, **kwargs): queryset = super(SmartListView, self).get_queryset(**kwargs) search_fields = self.derive_search_fields() search_query = self.request.GET.get('search') if search_fields and search_query: term_queries = [] for term in search_query.split(' '): field_queries = [] for field in search_fields: field_queries.append(Q(**{field: term})) term_queries.append(reduce(operator.or_, field_queries)) queryset = queryset.filter(reduce(operator.and_, term_queries)) related = self.derive_select_related() if related: queryset = queryset.select_related(*related) return queryset def get_queryset(self, **kwargs): queryset = self.derive_queryset(**kwargs) return self.order_queryset(queryset) def derive_ordering(self): if '_order' in self.request.GET: return self.request.GET['_order'] elif self.default_order: return self.default_order else: return None def order_queryset(self, queryset): order = self.derive_ordering() if '_order' in self.request.GET: if order.lstrip('-') not in self.derive_fields(): order = None if order: if isinstance(order, str): order = (order,) queryset = queryset.order_by(*order) return queryset def derive_fields(self): if self.fields: return self.fields else: fields = [] for field in self.object_list.model._meta.fields: if field.name != 'id': fields.append(field.name) return fields def get_is_active(self, obj): if obj.is_active: return '<div class="active_icon"></div>' else: return '' def render_to_response(self, context, **response_kwargs): if self.request.GET.get('_format', 'html') == 'select2': results = [] for obj in context['object_list']: result = None if hasattr(obj, 'as_select2'): result = obj.as_select2() if not result: result = dict(id=obj.pk, text="%s" % obj) results.append(result) has_more = context['page_obj'].has_next() if context['page_obj'] else False json_data = dict(results=results, err='nil', more=has_more) return JsonResponse(json_data) else: return super(SmartListView, self).render_to_response(context) class SmartCsvView(SmartListView): def derive_filename(self): filename = getattr(self, 'filename', None) if not filename: filename = "%s.csv" % self.model._meta.verbose_name.lower() return filename def render_to_response(self, context, **response_kwargs): import csv response = HttpResponse(content_type='text/csv; charset=utf-8') response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename() writer = csv.writer(response, quoting=csv.QUOTE_ALL) fields = self.derive_fields() header = [] for field in fields: header.append(str(self.lookup_field_label(dict(), field))) writer.writerow([s.encode("utf-8") for s in header]) for obj in self.object_list: row = [] for field in fields: row.append(str(self.lookup_field_value(dict(), obj, field))) writer.writerow([s.encode("utf-8") for s in row]) return response class SmartXlsView(SmartListView): def derive_filename(self): filename = getattr(self, 'filename', None) if not filename: filename = "%s.xls" % self.model._meta.verbose_name.lower() return filename def render_to_response(self, context, **response_kwargs): from xlwt import Workbook book = Workbook() sheet1 = book.add_sheet(self.derive_title()) fields = self.derive_fields() for col in range(len(fields)): field = fields[col] sheet1.write(0, col, str(self.lookup_field_label(dict(), field))) for row in range(len(self.object_list)): obj = self.object_list[row] for col in range(len(fields)): field = fields[col] value = str(self.lookup_field_value(dict(), obj, field)) sheet1.write(row + 1, col, value) response = HttpResponse(content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename() book.save(response) return response class SmartFormMixin(object): readonly = () field_config = {'modified_blurb': dict(label="Modified"), 'created_blurb': dict(label="Created")} success_message = None submit_button_name = _("Submit") def derive_title(self): if not self.title: return _("Form") else: return self.title def derive_success_message(self): return self.success_message def get_form(self): self.form = super(SmartFormMixin, self).get_form() fields = list(self.derive_fields()) exclude = self.derive_exclude() exclude += self.derive_readonly() for field in exclude: if field in self.form.fields: del self.form.fields[field] if fields is not None: remove = [name for name in self.form.fields.keys() if name not in fields] for name in remove: del self.form.fields[name] location = forms.CharField(widget=forms.widgets.HiddenInput(), required=False) if ('HTTP_REFERER' in self.request.META): location.initial = self.request.META['HTTP_REFERER'] self.form.fields['loc'] = location if fields: fields.append('loc') for (name, field) in self.form.fields.items(): field = self.customize_form_field(name, field) self.form.fields[name] = field return self.form def customize_form_field(self, name, field): if isinstance(field, forms.fields.DateField) and isinstance(field.widget, forms.widgets.DateInput): field.widget = widgets.DatePickerWidget() field.input_formats = [field.widget.input_format[1]] + list(field.input_formats) if isinstance(field, forms.fields.ImageField) and isinstance(field.widget, forms.widgets.ClearableFileInput): field.widget = widgets.ImageThumbnailWidget() return field def lookup_field_label(self, context, field, default=None): default = None meta_labels = self.form._meta.labels if hasattr(self.form, "_meta") else {} if meta_labels and field in meta_labels: default = meta_labels[field] else: for form_field in self.form: if form_field.name == field: default = form_field.label break return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) def lookup_field_help(self, field, default=None): default = None meta_help_texts = self.form._meta.help_texts if hasattr(self.form, "_meta") else {} if meta_help_texts and field in meta_help_texts: default = meta_help_texts[field] else: for form_field in self.form: if form_field.name == field: default = form_field.help_text break return super(SmartFormMixin, self).lookup_field_help(field, default=default) def derive_readonly(self): readonly = list(self.readonly) for key, value in self.field_config.items(): if 'readonly' in value and value['readonly']: readonly.append(key) return readonly def derive_fields(self): if self.fields is not None: fields = list(self.fields) else: form = self.form fields = [] for field in form: fields.append(field.name) readonly = self.derive_readonly() if readonly: fields += readonly for exclude in self.derive_exclude(): if exclude in fields: fields.remove(exclude) return fields
BSD 3-Clause New or Revised License
nedbat/zellij
zellij/euclid.py
Line.intersect
python
def intersect(self, other): assert isinstance(other, Line) (x1, y1), (x2, y2) = self (x3, y3), (x4, y4) = other denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if isclose(denom, 0): if line_collinear(self.p1, self.p2, other.p1): raise CoincidentLines("No intersection of identical lines") else: raise ParallelLines("No intersection of parallel lines") a = x1 * y2 - y1 * x2 b = x3 * y4 - y3 * x4 xi = (a * (x3 - x4) - b * (x1 - x2)) / denom yi = (a * (y3 - y4) - b * (y1 - y2)) / denom return Point(xi, yi)
Find the point where this Line and another intersect. Raises BadGeometry if the lines are parallel or coincident.
https://github.com/nedbat/zellij/blob/c86ce064995726c484d21a97f888843707c72889/zellij/euclid.py#L88-L112
from collections import namedtuple import math from .postulates import adjacent_pairs, overlap, fbetween, isclose, perturbed class BadGeometry(Exception): pass class ParallelLines(BadGeometry): pass class CoincidentLines(BadGeometry): pass class Point(namedtuple("Point", ["x", "y"])): def __repr__(self): return f"Point({self.x}, {self.y})" def is_close(self, other): assert isinstance(other, Point) x1, y1 = self x2, y2 = other return isclose(x1, x2) and isclose(y1, y2) def distance(self, other): assert isinstance(other, Point) x1, y1 = self x2, y2 = other return math.hypot(x2 - x1, y2 - y1) def transform(self, xform): return Point(*(xform * self)) def perturb(self, jitter): x, y = self return Point(perturbed(x, jitter), perturbed(y, jitter)) def line_collinear(p1, p2, p3): (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3 return isclose((y1 - y2) * (x1 - x3), (y1 - y3) * (x1 - x2)) def collinear(p1, p2, p3): (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3 if fbetween(x1, x2, x3) and fbetween(y1, y2, y3): return line_collinear(p1, p2, p3) else: return False def along_the_way(p1, p2, t): return Point(p1.x + (p2.x - p1.x) * t, p1.y + (p2.y - p1.y) * t) class Line(namedtuple("Line", ["p1", "p2"])): def angle(self): (x1, y1), (x2, y2) = self return math.degrees(math.atan2(y2 - y1, x2 - x1))
Apache License 2.0
numba/numba
numba/core/entrypoints.py
init_all
python
def init_all(): global _already_initialized if _already_initialized: return _already_initialized = True for entry_point in iter_entry_points('numba_extensions', 'init'): logger.debug('Loading extension: %s', entry_point) try: func = entry_point.load() func() except Exception as e: msg = "Numba extension module '{}' failed to load due to '{}({})'." warnings.warn(msg.format(entry_point.module_name, type(e).__name__, str(e)), stacklevel=2) logger.debug('Extension loading failed for: %s', entry_point)
Execute all `numba_extensions` entry points with the name `init` If extensions have already been initialized, this function does nothing.
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/entrypoints.py#L10-L31
import logging import warnings from pkg_resources import iter_entry_points _already_initialized = False logger = logging.getLogger(__name__)
BSD 2-Clause Simplified License
unofficial-memsource/memsource-cli-client
memsource_cli/models/user_edit_dto_v2.py
UserEditDtoV2.source_langs
python
def source_langs(self, source_langs): self._source_langs = source_langs
Sets the source_langs of this UserEditDtoV2. :param source_langs: The source_langs of this UserEditDtoV2. # noqa: E501 :type: list[str]
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/user_edit_dto_v2.py#L456-L464
import pprint import re import six from memsource_cli.models.id_reference import IdReference class UserEditDtoV2(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'user_name': 'str', 'first_name': 'str', 'last_name': 'str', 'email': 'str', 'role': 'str', 'timezone': 'str', 'note': 'str', 'may_edit_approved_terms': 'bool', 'may_reject_jobs': 'bool', 'editor_machine_translate_enabled': 'bool', 'receive_newsletter': 'bool', 'may_edit_translation_memory': 'bool', 'source_langs': 'list[str]', 'target_langs': 'list[str]', 'active': 'bool', 'workflow_steps': 'list[IdReference]', 'clients': 'list[IdReference]', 'domains': 'list[IdReference]', 'sub_domains': 'list[IdReference]', 'project_business_units': 'list[IdReference]' } attribute_map = { 'user_name': 'userName', 'first_name': 'firstName', 'last_name': 'lastName', 'email': 'email', 'role': 'role', 'timezone': 'timezone', 'note': 'note', 'may_edit_approved_terms': 'mayEditApprovedTerms', 'may_reject_jobs': 'mayRejectJobs', 'editor_machine_translate_enabled': 'editorMachineTranslateEnabled', 'receive_newsletter': 'receiveNewsletter', 'may_edit_translation_memory': 'mayEditTranslationMemory', 'source_langs': 'sourceLangs', 'target_langs': 'targetLangs', 'active': 'active', 'workflow_steps': 'workflowSteps', 'clients': 'clients', 'domains': 'domains', 'sub_domains': 'subDomains', 'project_business_units': 'projectBusinessUnits' } def __init__(self, user_name=None, first_name=None, last_name=None, email=None, role=None, timezone=None, note=None, may_edit_approved_terms=None, may_reject_jobs=None, editor_machine_translate_enabled=None, receive_newsletter=None, may_edit_translation_memory=None, source_langs=None, target_langs=None, active=None, workflow_steps=None, clients=None, domains=None, sub_domains=None, project_business_units=None): self._user_name = None self._first_name = None self._last_name = None self._email = None self._role = None self._timezone = None self._note = None self._may_edit_approved_terms = None self._may_reject_jobs = None self._editor_machine_translate_enabled = None self._receive_newsletter = None self._may_edit_translation_memory = None self._source_langs = None self._target_langs = None self._active = None self._workflow_steps = None self._clients = None self._domains = None self._sub_domains = None self._project_business_units = None self.discriminator = None self.user_name = user_name self.first_name = first_name self.last_name = last_name self.email = email self.role = role self.timezone = timezone if note is not None: self.note = note if may_edit_approved_terms is not None: self.may_edit_approved_terms = may_edit_approved_terms if may_reject_jobs is not None: self.may_reject_jobs = may_reject_jobs if editor_machine_translate_enabled is not None: self.editor_machine_translate_enabled = editor_machine_translate_enabled if receive_newsletter is not None: self.receive_newsletter = receive_newsletter if may_edit_translation_memory is not None: self.may_edit_translation_memory = may_edit_translation_memory if source_langs is not None: self.source_langs = source_langs if target_langs is not None: self.target_langs = target_langs if active is not None: self.active = active if workflow_steps is not None: self.workflow_steps = workflow_steps if clients is not None: self.clients = clients if domains is not None: self.domains = domains if sub_domains is not None: self.sub_domains = sub_domains if project_business_units is not None: self.project_business_units = project_business_units @property def user_name(self): return self._user_name @user_name.setter def user_name(self, user_name): if user_name is None: raise ValueError("Invalid value for `user_name`, must not be `None`") if user_name is not None and len(user_name) > 255: raise ValueError("Invalid value for `user_name`, length must be less than or equal to `255`") if user_name is not None and len(user_name) < 0: raise ValueError("Invalid value for `user_name`, length must be greater than or equal to `0`") self._user_name = user_name @property def first_name(self): return self._first_name @first_name.setter def first_name(self, first_name): if first_name is None: raise ValueError("Invalid value for `first_name`, must not be `None`") if first_name is not None and len(first_name) > 255: raise ValueError("Invalid value for `first_name`, length must be less than or equal to `255`") if first_name is not None and len(first_name) < 0: raise ValueError("Invalid value for `first_name`, length must be greater than or equal to `0`") self._first_name = first_name @property def last_name(self): return self._last_name @last_name.setter def last_name(self, last_name): if last_name is None: raise ValueError("Invalid value for `last_name`, must not be `None`") if last_name is not None and len(last_name) > 255: raise ValueError("Invalid value for `last_name`, length must be less than or equal to `255`") if last_name is not None and len(last_name) < 0: raise ValueError("Invalid value for `last_name`, length must be greater than or equal to `0`") self._last_name = last_name @property def email(self): return self._email @email.setter def email(self, email): if email is None: raise ValueError("Invalid value for `email`, must not be `None`") if email is not None and len(email) > 255: raise ValueError("Invalid value for `email`, length must be less than or equal to `255`") if email is not None and len(email) < 0: raise ValueError("Invalid value for `email`, length must be greater than or equal to `0`") self._email = email @property def role(self): return self._role @role.setter def role(self, role): if role is None: raise ValueError("Invalid value for `role`, must not be `None`") allowed_values = ["ADMIN", "PROJECT_MANAGER", "LINGUIST", "GUEST", "SUBMITTER"] if role not in allowed_values: raise ValueError( "Invalid value for `role` ({0}), must be one of {1}" .format(role, allowed_values) ) self._role = role @property def timezone(self): return self._timezone @timezone.setter def timezone(self, timezone): if timezone is None: raise ValueError("Invalid value for `timezone`, must not be `None`") if timezone is not None and len(timezone) > 255: raise ValueError("Invalid value for `timezone`, length must be less than or equal to `255`") if timezone is not None and len(timezone) < 0: raise ValueError("Invalid value for `timezone`, length must be greater than or equal to `0`") self._timezone = timezone @property def note(self): return self._note @note.setter def note(self, note): if note is not None and len(note) > 4096: raise ValueError("Invalid value for `note`, length must be less than or equal to `4096`") if note is not None and len(note) < 0: raise ValueError("Invalid value for `note`, length must be greater than or equal to `0`") self._note = note @property def may_edit_approved_terms(self): return self._may_edit_approved_terms @may_edit_approved_terms.setter def may_edit_approved_terms(self, may_edit_approved_terms): self._may_edit_approved_terms = may_edit_approved_terms @property def may_reject_jobs(self): return self._may_reject_jobs @may_reject_jobs.setter def may_reject_jobs(self, may_reject_jobs): self._may_reject_jobs = may_reject_jobs @property def editor_machine_translate_enabled(self): return self._editor_machine_translate_enabled @editor_machine_translate_enabled.setter def editor_machine_translate_enabled(self, editor_machine_translate_enabled): self._editor_machine_translate_enabled = editor_machine_translate_enabled @property def receive_newsletter(self): return self._receive_newsletter @receive_newsletter.setter def receive_newsletter(self, receive_newsletter): self._receive_newsletter = receive_newsletter @property def may_edit_translation_memory(self): return self._may_edit_translation_memory @may_edit_translation_memory.setter def may_edit_translation_memory(self, may_edit_translation_memory): self._may_edit_translation_memory = may_edit_translation_memory @property def source_langs(self): return self._source_langs @source_langs.setter
Apache License 2.0
libtcod/python-tcod
tcod/random.py
Random.__setstate__
python
def __setstate__(self, state: Any) -> None: try: cdata = state["random_c"] except KeyError: cdata = state["cdata"] del state["cdata"] state["random_c"] = ffi.new("mersenne_data_t*", cdata) self.__dict__.update(state)
Create a new cdata object with the stored paramaters.
https://github.com/libtcod/python-tcod/blob/5e946635ff0c89dd9f6fe6f62f21229ba91e171d/tcod/random.py#L154-L162
from __future__ import annotations import os import random import warnings from typing import Any, Hashable, Optional import tcod.constants from tcod.loader import ffi, lib MERSENNE_TWISTER = tcod.constants.RNG_MT COMPLEMENTARY_MULTIPLY_WITH_CARRY = tcod.constants.RNG_CMWC MULTIPLY_WITH_CARRY = tcod.constants.RNG_CMWC class Random(object): def __init__( self, algorithm: int = MERSENNE_TWISTER, seed: Optional[Hashable] = None, ): if seed is None: seed = random.getrandbits(32) elif not isinstance(seed, int): warnings.warn( "In the future this class will only accept integer seeds.", DeprecationWarning, stacklevel=2, ) if __debug__ and "PYTHONHASHSEED" not in os.environ: warnings.warn( "Python's hash algorithm is not configured to be" " deterministic so this non-integer seed will not be" " deterministic." "\nYou should do one of the following to fix this error:" "\n* Use an integer as a seed instead (recommended.)" "\n* Set the PYTHONHASHSEED environment variable before" " starting Python.", RuntimeWarning, stacklevel=2, ) seed = hash(seed) self.random_c = ffi.gc( ffi.cast( "mersenne_data_t*", lib.TCOD_random_new_from_seed(algorithm, seed & 0xFFFFFFFF), ), lib.TCOD_random_delete, ) @classmethod def _new_from_cdata(cls, cdata: Any) -> Random: self: Random = object.__new__(cls) self.random_c = cdata return self def randint(self, low: int, high: int) -> int: return int(lib.TCOD_random_get_i(self.random_c, low, high)) def uniform(self, low: float, high: float) -> float: return float(lib.TCOD_random_get_double(self.random_c, low, high)) def guass(self, mu: float, sigma: float) -> float: return float(lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma)) def inverse_guass(self, mu: float, sigma: float) -> float: return float(lib.TCOD_random_get_gaussian_double_inv(self.random_c, mu, sigma)) def __getstate__(self) -> Any: state = self.__dict__.copy() state["random_c"] = { "algo": self.random_c.algo, "distribution": self.random_c.distribution, "mt": list(self.random_c.mt), "cur_mt": self.random_c.cur_mt, "Q": list(self.random_c.Q), "c": self.random_c.c, "cur": self.random_c.cur, } return state
BSD 2-Clause Simplified License
abelfunctions/abelfunctions
abelfunctions/puiseux.py
newton_polygon
python
def newton_polygon(H, additional_points=[]): R = H.parent() x, y = R.gens() monomials = H.monomials() points = [(monom.degree(y), monom.degree(x)) for monom in monomials] support = [Point(pt) for pt in points] + additional_points i0 = min(P.x for P in support if P.y == 0) j0 = min(P.y for P in support if P.x == 0) support = [P for P in support if P.x <= i0 and P.y <= j0] convex_hull = sympy.convex_hull(*support) if isinstance(convex_hull, Point): P = (convex_hull.x, convex_hull.y) return [[P]] elif isinstance(convex_hull, Segment): P = convex_hull.p1 convex_hull = generalized_polygon_side(convex_hull) support.remove(P) support.append(convex_hull.p1) sides = [convex_hull] else: sides = convex_hull.sides first_side = generalized_polygon_side(sides[0]) if first_side != sides[0]: P = first_side.p1 return newton_polygon(H, additional_points=[P]) polygon = [] for side in sides: polygon_side = [P for P in support if P in side] polygon_side = sorted(map(lambda P: (int(P.x),int(P.y)), polygon_side)) polygon.append(polygon_side) if side.p2.y == 0: break return polygon
r"""Computes the Newton polygon of `H`. It's assumed that the first generator of `H` here is the "dependent variable". For example, if `H = H(x,y)` and we are aiming to compute a `y`-covering of the complex `x`-sphere then each monomial of `H` is of the form .. math:: a_{ij} x^j y^i. Parameters ---------- H : bivariate polynomial Returns ------- list Returns a list where each element is a list, representing a side of the polygon, which in turn contains tuples representing the points on the side. Note ---- This is written using Sympy's convex hull algorithm for legacy purposes. It can certainly be rewritten to use Sage's Polytope but do so *very carefully*! There are a number of subtle things going on here due to the fact that boundary points are ignored.
https://github.com/abelfunctions/abelfunctions/blob/67757a0b3744191c179ca4757e0db4a312bfd86a/abelfunctions/puiseux.py#L63-L140
import numpy import sympy from abelfunctions.puiseux_series_ring import PuiseuxSeriesRing from sage.all import xgcd from sage.functions.log import log from sage.functions.other import ceil from sage.rings.big_oh import O from sage.rings.infinity import infinity from sage.rings.laurent_series_ring import LaurentSeriesRing from sage.rings.qqbar import QQbar from sage.rings.rational_field import QQ from sympy import Point, Segment def newton_polygon_exceptional(H): R = H.parent() x,y = R.gens() d = H(0,y).degree(y) return [[(0,0),(d,0)]]
MIT License
mseitzer/csmri-refinement
data/reconstruction/deep_med_lib/my_pytorch/myImageTransformations.py
get_fspecial
python
def get_fspecial(length, angle): length = max(1, length) half = (length - 1) / 2 phi = np.mod(angle, 180) / 180 * np.pi cosphi = np.cos(phi) sinphi = np.sin(phi) xsign = np.int16(np.sign(cosphi)) linewdt = 1 eps = 2.2204e-16 sx = np.fix(half * cosphi + linewdt * xsign - length * eps) sy = np.fix(half * sinphi + linewdt - length * eps) [x, y] = np.meshgrid(np.arange(0,np.int16(sx.flatten()[0])+(xsign*1),xsign), np.arange(0,np.int16(sy.flatten()[0])+1)) dist2line = (y * cosphi - x * sinphi) rad = np.sqrt(x**2 + y**2) lastpix = np.logical_and((rad>=half),(np.abs(dist2line) <= linewdt)) x2lastpix = half - np.abs((x[lastpix] + dist2line[lastpix] * sinphi) / cosphi) dist2line[lastpix] = np.sqrt(dist2line[lastpix] ** 2 + x2lastpix ** 2) dist2line = linewdt + eps - np.abs(dist2line) dist2line[dist2line < 0] = 0 dist2line kernel = np.rot90(dist2line,2) n_kernel = np.zeros((kernel.shape[0]+dist2line.shape[0]-1, kernel.shape[1]+dist2line.shape[1]-1)) n_kernel[0:kernel.shape[0],0:kernel.shape[1]] = kernel n_kernel[kernel.shape[0]-1:,kernel.shape[1]-1:] = dist2line n_kernel = n_kernel/(np.sum(n_kernel) + eps*length*length) if cosphi > 0: n_kernel = np.flipud(n_kernel) return n_kernel
Motion kernel is adopted from MATLAB's fspecial('motion') Challenge slide says: blur length: max ~25 px angle range: 0-180 degree
https://github.com/mseitzer/csmri-refinement/blob/2cc8a691c03602c2a7c78c6144469ee00a7d64d6/data/reconstruction/deep_med_lib/my_pytorch/myImageTransformations.py#L279-L330
import numpy as np import scipy import scipy.ndimage from scipy.ndimage.filters import gaussian_filter from scipy.ndimage.interpolation import map_coordinates from scipy.stats import truncnorm import collections import logging from PIL import Image import numbers import cv2 from ..utils import compressed_sensing as cs from ..utils import mymath from ..utils import dnn_io __author__ = "Wei OUYANG" __license__ = "GPL" __version__ = "0.1.0" __status__ = "Development" def get_mask_generator(sampling_scheme, im_shape, acceleration_factor, variable=False, var_type='uniform', rng=None): if rng is None: rng = np.random logging.debug("sampling scheme: {}".format(sampling_scheme)) size = im_shape[-1] def mask_gen(): if sampling_scheme == 'radial': if variable: x_in = np.arange(1, size//2) if var_type == 'aggressive': pdf = np.minimum(0.5, np.exp(-2*np.linspace(0, 4, len(x_in))) + 1./size) pdf = pdf / np.sum(pdf) acc_factors = rng.choice(x_in, im_shape[0], p=pdf) else: acc_factors = rng.randint(1, len(x_in), im_shape[0]) mask = [] for i in range(im_shape[0]): mask.append(cs.radial_sampling((1, size, size), acc_factors[i], rand=True, golden_angle=True, centred=False, rng=rng)) mask = np.array(mask) mask = mask.reshape(im_shape) else: n_lines = acceleration_factor mask = cs.radial_sampling(im_shape, n_lines, rand=True, golden_angle=True, centred=False, rng=rng) else: central_lines = 8 if variable: mask = np.zeros(im_shape) for i in range(im_shape[0]): acc_r = float(rng.uniform(1, acceleration_factor*1.5)) mask[i] = cs.cartesian_mask(mask.shape[1:], acc_r, central_lines, centred=False, rng=rng) else: mask = cs.cartesian_mask(im_shape, acceleration_factor, central_lines, centred=False, rng=rng) return mask return mask_gen def undersample(im, mask, rng=None): im_und, k_und = cs.undersample(im, mask, centred=False, norm='ortho', rng=rng) und_sampl_rate = np.sum(mask.ravel()) * 1. / mask.size return im_und, k_und, mask, und_sampl_rate def get_truncated_normal(mean=0, sd=1, low=0, upp=10): return truncnorm( (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) def center_crop(x, center_crop_size): assert x.ndim == 3 centerw, centerh = x.shape[1] // 2, x.shape[2] // 2 halfw, halfh = center_crop_size[0] // 2, center_crop_size[1] // 2 return x[:, centerw - halfw:centerw + halfw, centerh - halfh:centerh + halfh] def crop_image_at(image, cx, cy, sx, sy): X, Y = image.shape[:2] r1, r2 = sx // 2, sy //2 x1, x2 = cx - r1, cx + r1 y1, y2 = cy - r2, cy + r2 x1_, x2_ = max(x1, 0), min(x2, X) y1_, y2_ = max(y1, 0), min(y2, Y) crop = image[x1_: x2_, y1_: y2_] crop = np.pad(crop, ((x1_ - x1, x2 - x2_), (y1_ - y1, y2 - y2_)) + ((0, 0),) * (crop.ndim-2), 'constant') return crop def to_tensor(x): import torch x = x.transpose((2, 0, 1)) return torch.from_numpy(x).float() def get_attribute(attribute, random_state): if isinstance(attribute, collections.Sequence): attr = random_num_generator(attribute, random_state=random_state) else: attr = attribute return attr def random_num_generator(config, random_state=np.random): if config[0] == 'uniform': ret = random_state.uniform(config[1], config[2], 1)[0] elif config[0] == 'lognormal': ret = random_state.lognormal(config[1], config[2], 1)[0] else: raise Exception('unsupported format') return ret def poisson_downsampling(image, peak, random_state=np.random): if not isinstance(image, np.ndarray): imgArr = np.array(image, dtype='float32') else: imgArr = image.astype('float32') Q = imgArr.max(axis=(0, 1)) / peak if Q[0] == 0: return imgArr ima_lambda = imgArr / Q noisy_img = random_state.poisson(lam=ima_lambda) return noisy_img.astype('float32') def apply_gaussian_noise(im_in, mean=0, sigma=0.01): low_clip = -1. if im_in.min() < 0 else 0 noise = np.random.normal(mean, sigma, im_in.shape) return np.clip(im_in + noise, low_clip, 255.) def apply_poission_matlab(im_in): low_clip = -1. if im_in.min() < 0 else 0 vals = len(np.unique(im_in)) vals = 2 ** np.ceil(np.log2(vals)) if low_clip == -1.: old_max = im_in.max() im_in = (im_in + 1.) / (old_max + 1.) out = np.random.poisson(im_in * vals) / float(vals) if low_clip == -1.: out = out * (old_max + 1.) - 1. return np.clip(out, low_clip, 255.) def apply_salt_and_pepper_noise(im_in, amount=0.1, salt_vs_pepper=0.5): out = im_in.copy() low_clip = -1. if im_in.min() < 0 else 0 p = amount q = salt_vs_pepper flipped = np.random.choice([True, False], size=im_in.shape, p=[p, 1 - p]) salted = np.random.choice([True, False], size=im_in.shape, p=[q, 1 - q]) peppered = ~salted out[flipped & salted] = 255. out[flipped & peppered] = low_clip return np.clip(out, low_clip, 255.) def apply_speckle_noise(im_in, mean=0, sigma=0.01): low_clip = -1. if im_in.min() < 0 else 0 noise = np.random.normal(mean, sigma, im_in.shape) return np.clip(im_in + im_in * noise, low_clip, 255.) def affine_transform(image, alpha_affine, borderMode=cv2.BORDER_CONSTANT): imshape = image.shape shape_size = imshape[:2] center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + np.random.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) warped = cv2.warpAffine(image.reshape(shape_size + (-1,)), M, shape_size[::-1], flags=cv2.INTER_NEAREST, borderMode=borderMode) warped = warped[..., np.newaxis].reshape(imshape) return warped def perspective_transform(image, alpha_warp): shape_size = image.shape[:2] center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]-square_size, center_square[1]+square_size], center_square - square_size, [center_square[0]+square_size, center_square[1]-square_size]]) pts2 = pts1 + np.random.uniform(-alpha_warp, alpha_warp, size=pts1.shape).astype(np.float32) M = cv2.getPerspectiveTransform(pts1, pts2) return cv2.warpPerspective(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101) def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random): assert image.ndim == 3 shape = image.shape[:2] dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij') indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))] result = np.empty_like(image) for i in range(image.shape[2]): result[:, :, i] = map_coordinates( image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape) return result def get_motion_blur_kernel(l=None, th=None): kernel_motion_blur = np.zeros((l, l)) kernel_motion_blur[int((l-1)/2), :] = np.ones(l) kernel_motion_blur = kernel_motion_blur / l M = cv2.getRotationMatrix2D((l/2,l/2), th, 1) kernel_motion_blur_rotated = cv2.warpAffine(kernel_motion_blur, M, (l, l)) return kernel_motion_blur_rotated
Apache License 2.0
commvault/cvpysdk
cvpysdk/drorchestration/failovergroups.py
FailoverGroups.refresh
python
def refresh(self): self._failovergroups = self._get_failover_groups()
Refresh the failover groups created in the commcell. Args: Returns: Raises:
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/drorchestration/failovergroups.py#L370-L379
from __future__ import absolute_import from __future__ import unicode_literals from past.builtins import basestring from ..exception import SDKException from .drorchestrationoperations import DROrchestrationOperations from cvpysdk.instances.vsinstance import VirtualServerInstance class FailoverGroups(object): def __init__(self, commcell_object, instance_object: VirtualServerInstance = None): self._commcell_object = commcell_object self._client_object = commcell_object.clients self._services = commcell_object._services self._instance_object = instance_object self._DRGROUPS = self._commcell_object._services['DR_GROUPS'] self._DRGROUPS_MACHINES = self._commcell_object._services['DR_GROUP_MACHINES'] self._vclientId = None self._failovergroups = None self.refresh() def __str__(self): representation_string = '{:^5}\t{:^20}\t{:^20}\n\n'.format( 'S. No.', 'Failover Group Id', 'Failover Group') for index, failover_group in enumerate(self._failovergroups): sub_str = '{:^5}\t{:20}\t{:20}\n'.format( index + 1, self._failovergroups[failover_group], failover_group ) representation_string += sub_str return representation_string.strip() def __repr__(self): return "Failover Groups for Commserv: '{0}'".format( self._commcell_object.commserv_name) def has_failover_group(self, failover_group_name): if not isinstance(failover_group_name, basestring): raise SDKException('FailoverGroup', '101') return self.failover_groups and failover_group_name.lower() in self.failover_groups def add(self, failover_group_options=None): self._check_failover_group_options(failover_group_options) add_failover_group_json = self._prepare_add_failover_group_json( failover_group_options) if not add_failover_group_json: raise SDKException( 'FailoverGroup', '102', 'Failed to construct add failover group json.') (flag, response) = self._commcell_object._cvpysdk_object.make_request( 'POST', self._DRGROUPS, add_failover_group_json) if flag: if response.json(): if 'error' in response.json(): error_message = response.json()['error']['errorMessage'] o_str = 'Failed to create failover group \nError: "{0}"'.format( error_message) raise SDKException('FailoverGroup', '102', o_str) else: self.refresh() return self.get(failover_group_options) else: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string) def get(self, failover_group_options): if not isinstance(failover_group_options, dict): raise SDKException('FailoverGroup', '101') else: failover_group_name = failover_group_options.get( 'failoverGroupName').lower() if self.has_failover_group(failover_group_name): return FailoverGroup( self._commcell_object, failover_group_options) raise SDKException( 'Failover', '102', 'Failover group doesnt exists with name: {0}'.format(failover_group_name)) def delete(self, failover_group_name): if not isinstance(failover_group_name, basestring): raise SDKException('FailoverGroup', '101') else: failover_group_name = failover_group_name.lower() if self.has_failover_group(failover_group_name): failover_group_id = self.failover_groups.get( failover_group_name.lower()) if failover_group_id: _GET_DR_GROUP = self._commcell_object._services['GET_DR_GROUP'] % ( failover_group_id) (flag, response) = self._commcell_object._cvpysdk_object.make_request( method='DELETE', url=_GET_DR_GROUP) if flag: if response.json(): if 'error' in response.json(): error_message = response.json( )['error']['errorMessage'] o_str = 'Failed to delete failover group: {0} \nError: "{1}"' .format(failover_group_name, error_message) raise SDKException('Failover', '102', o_str) else: self.refresh() else: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string) else: raise SDKException( 'Failover', '102', 'No failovergroup exists with name: "{0}"'.format( failover_group_name) )
Apache License 2.0
napalm-automation/napalm
napalm/pyIOSXR/iosxr.py
IOSXR.lock
python
def lock(self): if not self.locked: rpc_command = "<Lock/>" try: self._execute_rpc(rpc_command) except XMLCLIError: raise LockError("Unable to enter in configure exclusive mode!", self) self.locked = True
Lock the config database. Use if Locking/Unlocking is not performaed automatically by lock=False
https://github.com/napalm-automation/napalm/blob/48a73ef16f3454dd14a2be87f08c9895ea601fa2/napalm/pyIOSXR/iosxr.py#L526-L538
import re import time import difflib import logging from threading import Lock from xml.sax.saxutils import escape as escape_xml from lxml import etree as ET from netmiko import ConnectHandler from netmiko.ssh_exception import NetMikoTimeoutException from netmiko.ssh_exception import NetMikoAuthenticationException from napalm.pyIOSXR.exceptions import LockError from napalm.pyIOSXR.exceptions import UnlockError from napalm.pyIOSXR.exceptions import XMLCLIError from napalm.pyIOSXR.exceptions import CommitError from napalm.pyIOSXR.exceptions import ConnectError from napalm.pyIOSXR.exceptions import TimeoutError from napalm.pyIOSXR.exceptions import IteratorIDError from napalm.pyIOSXR.exceptions import InvalidInputError from napalm.pyIOSXR.exceptions import InvalidXMLResponse from napalm.iosxr.utilities import strip_config_header logger = logging.getLogger(__name__) class IOSXR(object): _XML_SHELL = "xml" _XML_MODE_PROMPT = r"XML>" _READ_DELAY = 0.1 _XML_MODE_DELAY = 1 _ITERATOR_ID_ERROR_MSG = ( "Non-supported IteratorID in response object. " 'Turn iteration off on your XML agent by configuring "xml agent [tty | ssl] iteration off".' " For more information refer to " "http://www.cisco.com/c/en/us/td/docs/ios_xr_sw/iosxr_r4-1/xml/" "programming/guide/xl41apidoc.pdf, page 7-99. " "Please turn iteration off for the XML agent." ) def __init__( self, hostname, username, password, port=22, timeout=60, logfile=None, lock=True, **netmiko_kwargs ): self.hostname = str(hostname) self.username = str(username) self.password = str(password) self.port = int(port) self.timeout = int(timeout) self.logfile = logfile self.lock_on_connect = lock self.locked = False self.netmiko_kwargs = netmiko_kwargs self._cli_prompt = None self._xml_agent_locker = Lock() self._xml_agent_alive = False def __getattr__(self, item): def _getattr(*args, **kwargs): cmd = item.replace("_", " ") for arg in args: cmd += " %s" % arg if kwargs.get("config"): response = self._execute_config_show(cmd) else: response = self._execute_show(cmd) match = re.search( ".*(!! IOS XR Configuration.*)</Exec>", response, re.DOTALL ) if match is not None: response = match.group(1) return response if item.startswith("show"): return _getattr else: raise AttributeError( "type object '%s' has no attribute '%s'" % (self.__class__.__name__, item) ) def make_rpc_call(self, rpc_command): if not self.is_alive(): logger.debug("Force closing tunnel before making RPC Call") self.close() self.open() logger.debug("Re-opening tunnel before making RPC Call") result = self._execute_rpc(rpc_command) logger.debug(result) return ET.tostring(result) def open(self): try: self.device = ConnectHandler( device_type="cisco_xr", ip=self.hostname, port=self.port, username=self.username, password=self.password, global_cmd_verify=False, **self.netmiko_kwargs ) self.device.timeout = self.timeout self._xml_agent_alive = True except NetMikoTimeoutException as t_err: logger.error(t_err.args[0]) raise ConnectError(t_err.args[0]) except NetMikoAuthenticationException as au_err: logger.error(au_err.args[0]) raise ConnectError(au_err.args[0]) self._cli_prompt = self.device.find_prompt() self._enter_xml_mode() def is_alive(self): if hasattr(self.device, "remote_conn"): return ( self.device.remote_conn.transport.is_active() and self._xml_agent_alive ) return False def _timeout_exceeded(self, start=None, msg="Timeout exceeded!"): if not start: return False if time.time() - start > self.timeout: raise TimeoutError(msg, self) return False def _lock_xml_agent(self, start=None): while not self._xml_agent_locker.acquire(False) and not self._timeout_exceeded( start, "Waiting to acquire the XML agent!" ): pass return True def _unlock_xml_agent(self): if self._xml_agent_locker.locked(): self._xml_agent_locker.release() def _send_command_timing(self, command): return self.device.send_command_timing( command, delay_factor=self._READ_DELAY, max_loops=self._XML_MODE_DELAY / self._READ_DELAY, strip_prompt=False, strip_command=False, ) def _in_cli_mode(self): out = self._send_command_timing("\n") if not out: return False if self._cli_prompt in out: return True return False def _enter_xml_mode(self): self._unlock_xml_agent() self._lock_xml_agent() out = self._send_command_timing(self._XML_SHELL) if "0x24319600" in out: raise ConnectError( "XML agent is not enabled. Please configure `xml agent tty iteration off`!", self, ) self._unlock_xml_agent() if self.lock_on_connect: self.lock() def _send_command( self, command, delay_factor=None, start=None, expect_string=None, read_output=None, receive=False, ): if not expect_string: expect_string = self._XML_MODE_PROMPT if read_output is None: read_output = "" if not delay_factor: delay_factor = self._READ_DELAY if not start: start = time.time() output = read_output last_read = "" if not read_output and not receive: self._lock_xml_agent(start) try: max_loops = self.timeout / delay_factor last_read = self.device.send_command_expect( command, expect_string=expect_string, strip_prompt=False, strip_command=False, delay_factor=delay_factor, max_loops=max_loops, ) output += last_read except IOError: if (not last_read and self._in_cli_mode()) or ( self._cli_prompt in output and "% Invalid input detected at '^' marker." in output ): self._enter_xml_mode() if not self._timeout_exceeded(start=start): return self._send_command( command, expect_string=expect_string, delay_factor=delay_factor, ) else: output += self.device._read_channel_timing() if "0xa3679e00" in output or "0xa367da00" in output: raise XMLCLIError("XML agent cannot process parallel requests!", self) if not output.strip().endswith("XML>"): if "0x44318c06" in output or ( self._cli_prompt and expect_string != self._cli_prompt and ( output.startswith(self._cli_prompt) or output.endswith(self._cli_prompt) ) ): self._unlock_xml_agent() self._enter_xml_mode() raise XMLCLIError( "Could not properly execute the command. Re-entering XML mode...", self, ) if ( not output.strip() ): if not self._timeout_exceeded(start=start): return self._send_command( command, receive=True, start=start ) raise XMLCLIError(output.strip(), self) self._unlock_xml_agent() return str(output.replace("XML>", "").strip()) def _execute_rpc(self, command_xml, delay_factor=0.1): xml_rpc_command = ( '<?xml version="1.0" encoding="UTF-8"?><Request MajorVersion="1" MinorVersion="0">' + command_xml + "</Request>" ) response = self._send_command(xml_rpc_command, delay_factor=delay_factor) try: root = ET.fromstring(str.encode(response)) except ET.XMLSyntaxError: if 'IteratorID="' in response: logger.error(self._ITERATOR_ID_ERROR_MSG) raise IteratorIDError(self._ITERATOR_ID_ERROR_MSG, self) raise InvalidXMLResponse( "Unable to process the XML Response from the device!", self ) if "IteratorID" in root.attrib: logger.error(self._ITERATOR_ID_ERROR_MSG) raise IteratorIDError(self._ITERATOR_ID_ERROR_MSG, self) childs = [x.tag for x in list(root)] result_summary = root.find("ResultSummary") if result_summary is not None and int(result_summary.get("ErrorCount", 0)) > 0: if "CLI" in childs: error_msg = root.find("CLI").get("ErrorMsg") or "" elif "Commit" in childs: error_msg = root.find("Commit").get("ErrorMsg") or "" error_code = root.find("Commit").get("ErrorCode") or "" if error_code == "0x41866c00": _candidate_config = self.get_candidate_config(merge=True) self.discard_config() try: self._send_command("exit", expect_string=self._cli_prompt) except XMLCLIError: pass self._enter_xml_mode() self.load_candidate_config(config=_candidate_config) return self.commit_config() elif error_code == "0x41864e00" or error_code == "0x43682c00": raise CommitError("The target configuration buffer is empty.", self) else: error_msg = root.get("ErrorMsg") or "" error_msg += "\nOriginal call was: %s" % xml_rpc_command logger.error(error_msg) raise XMLCLIError(error_msg, self) if "CLI" in childs: cli_childs = [x.tag for x in list(root.find("CLI"))] if "Configuration" in cli_childs: output = root.find("CLI").find("Configuration").text elif "Exec" in cli_childs: output = root.find("CLI").find("Exec").text if output is None: output = "" elif "Invalid input detected" in output: logger.error("Invalid input entered:\n%s" % (output)) raise InvalidInputError("Invalid input entered:\n%s" % output, self) return root def _execute_show(self, show_command): rpc_command = "<CLI><Exec>{show_command}</Exec></CLI>".format( show_command=escape_xml(show_command) ) response = self._execute_rpc(rpc_command) raw_response = response.xpath(".//CLI/Exec")[0].text return raw_response.strip() if raw_response else "" def _execute_config_show(self, show_command, delay_factor=0.1): rpc_command = "<CLI><Configuration>{show_command}</Configuration></CLI>".format( show_command=escape_xml(show_command) ) response = self._execute_rpc(rpc_command, delay_factor=delay_factor) raw_response = response.xpath(".//CLI/Configuration")[0].text return raw_response.strip() if raw_response else "" def close(self): if self.lock_on_connect or self.locked: self.unlock() self._unlock_xml_agent() if hasattr(self.device, "remote_conn"): self.device.remote_conn.close()
Apache License 2.0
consensys/py-eip712-structs
tests/test_chain_parity.py
w3
python
def w3(): client = Web3(HTTPProvider('http://localhost:8545')) client.eth.defaultAccount = client.eth.accounts[0] return client
Provide a Web3 client to interact with a local chain.
https://github.com/consensys/py-eip712-structs/blob/b05c1dfe073644ceef1b339a28cd201cb6d2ea88/tests/test_chain_parity.py#L11-L15
import os import pytest from requests.exceptions import ConnectionError from web3 import HTTPProvider, Web3 from eip712_structs import EIP712Struct, String, Uint, Int, Address, Boolean, Bytes, Array @pytest.fixture(scope='module')
MIT License
tum-pbs/phiflow
phi/math/_shape.py
Shape.type
python
def type(self) -> int: assert self.rank == 1, "Shape.type is only defined for shapes of rank 1." return self.types[0]
Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`.
https://github.com/tum-pbs/phiflow/blob/4a85f8a5029aa4e30a791daa659f2c8e1536e37e/phi/math/_shape.py#L361-L370
import warnings from typing import Tuple from phi import math BATCH_DIM = 'batch' SPATIAL_DIM = 'spatial' CHANNEL_DIM = 'channel' INSTANCE_DIM = 'înstance' TYPE_ABBR = {SPATIAL_DIM: "ˢ", CHANNEL_DIM: "ᶜ", INSTANCE_DIM: "ⁱ", BATCH_DIM: "ᵇ", None: "⁻"} class Shape: def __init__(self, sizes: tuple or list, names: tuple or list, types: tuple or list): assert len(sizes) == len(names) == len(types), f"sizes={sizes} ({len(sizes)}), names={names} ({len(names)}), types={types} ({len(types)})" if len(sizes) > 0: from ._tensors import Tensor sizes = tuple([s if isinstance(s, Tensor) or s is None else int(s) for s in sizes]) else: sizes = () self.sizes: tuple = sizes self.names: Tuple[str] = tuple(names) assert all(isinstance(n, str) for n in names), f"All names must be of type string but got {names}" self.types: Tuple[str] = tuple(types) @property def _named_sizes(self): return zip(self.names, self.sizes) @property def _dimensions(self): return zip(self.sizes, self.names, self.types) def __len__(self): return len(self.sizes) def __contains__(self, item): if isinstance(item, str): return item in self.names elif isinstance(item, Shape): return all([d in self.names for d in item.names]) else: raise ValueError(item) def __iter__(self): return iter(self[i] for i in range(self.rank)) def index(self, dim: str or 'Shape' or None) -> int: if dim is None: return None elif isinstance(dim, str): return self.names.index(dim) elif isinstance(dim, Shape): assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions." return self.names.index(dim.name) else: raise ValueError(f"index() requires a single dimension as input but got {dim}") def indices(self, dims: tuple or list or 'Shape') -> Tuple[int]: if isinstance(dims, (list, tuple)): return tuple(self.index(n) for n in dims) elif isinstance(dims, Shape): return tuple(self.index(n) for n in dims.names) else: raise ValueError(f"indices() requires a sequence of dimensions but got {dims}") def get_size(self, dim: str or 'Shape'): if isinstance(dim, str): return self.sizes[self.names.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes." return self.sizes[self.names.index(dim.name)] else: raise ValueError(f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes.") def get_sizes(self, dims: tuple or list or 'Shape') -> tuple: assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}" return tuple([self.get_size(dim) for dim in dims]) def get_type(self, dim: str or 'Shape') -> str: if isinstance(dim, str): return self.types[self.names.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" return self.types[self.names.index(dim.name)] else: raise ValueError(dim) def get_types(self, dims: tuple or list or 'Shape') -> tuple: if isinstance(dims, (tuple, list)): return tuple(self.get_type(n) for n in dims) elif isinstance(dims, Shape): return tuple(self.get_type(n) for n in dims.names) else: raise ValueError(dims) def __getitem__(self, selection): if isinstance(selection, int): return Shape([self.sizes[selection]], [self.names[selection]], [self.types[selection]]) elif isinstance(selection, slice): return Shape(self.sizes[selection], self.names[selection], self.types[selection]) elif isinstance(selection, str): index = self.index(selection) return Shape([self.sizes[index]], [self.names[index]], [self.types[index]]) elif isinstance(selection, (tuple, list)): return Shape([self.sizes[i] for i in selection], [self.names[i] for i in selection], [self.types[i] for i in selection]) raise AssertionError("Can only access shape elements as shape[int] or shape[slice]") @property def batch(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]] @property def non_batch(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]] @property def spatial(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]] @property def non_spatial(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]] @property def instance(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]] @property def non_instance(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]] @property def channel(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]] @property def non_channel(self) -> 'Shape': return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]] def unstack(self, dim='dims') -> Tuple['Shape']: if dim == 'dims': return tuple(Shape([self.sizes[i]], [self.names[i]], [self.types[i]]) for i in range(self.rank)) if dim not in self: return tuple([self]) else: from ._tensors import Tensor inner = self.without(dim) sizes = [] dim_size = self.get_size(dim) for size in inner.sizes: if isinstance(size, Tensor) and dim in size.shape: sizes.append(size.unstack(dim)) dim_size = size.shape.get_size(dim) else: sizes.append(size) assert isinstance(dim_size, int) shapes = tuple(Shape([int(size[i]) if isinstance(size, tuple) else size for size in sizes], inner.names, inner.types) for i in range(dim_size)) return shapes @property def name(self) -> str: assert self.rank == 1, "Shape.name is only defined for shapes of rank 1." return self.names[0] @property def size(self) -> int: assert self.rank == 1, "Shape.size is only defined for shapes of rank 1." return self.sizes[0] @property
MIT License
alexa/alexa-apis-for-python
ask-sdk-model/ask_sdk_model/session_ended_reason.py
SessionEndedReason.__eq__
python
def __eq__(self, other): if not isinstance(other, SessionEndedReason): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/alexa/alexa-apis-for-python/blob/bfe5e694daaca71bfb1a4199ca8d2514f1cac6c9/ask-sdk-model/ask_sdk_model/session_ended_reason.py#L56-L62
import pprint import re import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class SessionEndedReason(Enum): USER_INITIATED = "USER_INITIATED" ERROR = "ERROR" EXCEEDED_MAX_REPROMPTS = "EXCEEDED_MAX_REPROMPTS" def to_dict(self): result = {self.name: self.value} return result def to_str(self): return pprint.pformat(self.value) def __repr__(self): return self.to_str()
Apache License 2.0
mrknow/filmkodi
plugin.video.mrknow/mylib/pydev_ipython/inputhook.py
InputHookManager.set_inputhook
python
def set_inputhook(self, callback): self._callback = callback
Set inputhook to callback.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/plugin.video.mrknow/mylib/pydev_ipython/inputhook.py#L83-L87
import sys import select GUI_WX = 'wx' GUI_QT = 'qt' GUI_QT4 = 'qt4' GUI_GTK = 'gtk' GUI_TK = 'tk' GUI_OSX = 'osx' GUI_GLUT = 'glut' GUI_PYGLET = 'pyglet' GUI_GTK3 = 'gtk3' GUI_NONE = 'none' def ignore_CTRL_C(): pass def allow_CTRL_C(): pass class InputHookManager(object): def __init__(self): self._return_control_callback = None self._apps = {} self._reset() self.pyplot_imported = False def _reset(self): self._callback_pyfunctype = None self._callback = None self._current_gui = None def set_return_control_callback(self, return_control_callback): self._return_control_callback = return_control_callback def get_return_control_callback(self): return self._return_control_callback def return_control(self): return self._return_control_callback() def get_inputhook(self): return self._callback
Apache License 2.0
abstractgeek/comic-scraper
comic_scraper/extractors/mangareader.py
MangaReaderChapter.download_page
python
def download_page(self, page): page_url, page_num = page urlscheme = urlparse(page_url) filename = os.path.join(self.chapter_location, '%0.3d.jpg' % (page_num)) max_retries = deepcopy(self.max_retries) wait_retry_time = deepcopy(self.wait_time) while True: r = requests.get(page_url, verify=self.verify_https) soup = bsoup.BeautifulSoup(r.text, 'html.parser') for div in soup.find_all('div', {'id':'imgholder'}): if div.get('id'): img = div.find_all('img') break if img: image = urljoin(urlscheme.scheme + "://" + urlscheme.netloc, img[0].get('src')) self.download_image(image, filename) return True elif (max_retries > 0): sleep(uniform(0.5 * wait_retry_time, 1.5 * wait_retry_time)) max_retries -= 1 else: print("Failed download: Chapter-%g, page-%d" % (self.chapter_num, page_num)) shutil.copyfile( os.path.join(os.path.dirname( os.path.realpath(__file__)), 'no_image_available.png'), filename) return False
Download individual pages in a manga.
https://github.com/abstractgeek/comic-scraper/blob/8e97d672e4d772f185f0c08f808a5584f6c8f073/comic_scraper/extractors/mangareader.py#L100-L135
from base_comic import BaseComic, BaseChapter from urllib.parse import urlparse, urljoin import requests import bs4 as bsoup from collections import defaultdict import re import os import shutil from random import shuffle, uniform from copy import deepcopy from time import sleep class MangaReaderComic(BaseComic): def extract_chapters(self): comic_name = self.name url = self.url urlscheme = urlparse(url) r = requests.get(url, verify=self.verify_https) soup = bsoup.BeautifulSoup(r.text, 'html.parser') chapters = defaultdict(MangaReaderChapter) links = [link.get('href') for link in soup.find_all('a') if link.get('href') and (comic_name in link.get('href'))] for link in links: chapter_link = urljoin(urlscheme.scheme + "://" + urlscheme.netloc, link) matched_groups = re.search('/([\d \.]+)', chapter_link) if matched_groups: chapter_num = float(matched_groups.group(1)) if chapter_num in chapters: continue else: chapters[chapter_num] = MangaReaderChapter( self, chapter_num, chapter_link) return chapters def page_filter(tag): test = (tag.name == 'option') test = (test and tag.parent.name == 'select') test = (test and 'pageMenu' in tag.parent['name']) return test class MangaReaderChapter(BaseChapter): def get_pages(self): base_url = self.chapter_url max_retries = deepcopy(self.max_retries) wait_retry_time = deepcopy(self.wait_time) urlscheme = urlparse(base_url) while True: r = requests.get(base_url, verify=self.verify_https) soup = bsoup.BeautifulSoup(r.text, 'html.parser') page_list = soup.find_all(page_filter) pages = [] for page in page_list: curr_url = page.get('value') try: page_num = float(curr_url.split('/')[-1]) except: page_num = 1 page_url = urljoin(urlscheme.scheme + "://" + urlscheme.netloc, curr_url) pages.append((page_url, page_num)) if pages: shuffle(pages) return True, pages elif (max_retries > 0): sleep(uniform(0.5 * wait_retry_time, 1.5 * wait_retry_time)) max_retries -= 1 else: return False, None
MIT License
yasoob/youtube-dl-gui
youtube_dl/extractor/common.py
InfoExtractor.__init__
python
def __init__(self, downloader=None): self._ready = False self._x_forwarded_for_ip = None self.set_downloader(downloader)
Constructor. Receives an optional downloader.
https://github.com/yasoob/youtube-dl-gui/blob/2c5b1c2300050c86d1245bcc8823b81d9d97f346/youtube_dl/extractor/common.py#L400-L404
from __future__ import unicode_literals import base64 import datetime import hashlib import json import netrc import os import random import re import socket import ssl import sys import time import math from ..compat import ( compat_cookiejar_Cookie, compat_cookies_SimpleCookie, compat_etree_Element, compat_etree_fromstring, compat_getpass, compat_integer_types, compat_http_client, compat_os_name, compat_str, compat_urllib_error, compat_urllib_parse_unquote, compat_urllib_parse_urlencode, compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) from ..downloader.f4m import ( get_base_url, remove_encrypted_media, ) from ..utils import ( NO_DEFAULT, age_restricted, base_url, bug_reports_message, clean_html, compiled_regex_type, determine_ext, determine_protocol, dict_get, error_to_compat_str, ExtractorError, extract_attributes, fix_xml_ampersands, float_or_none, GeoRestrictedError, GeoUtils, int_or_none, js_to_json, JSON_LD_RE, mimetype2ext, orderedSet, parse_bitrate, parse_codecs, parse_duration, parse_iso8601, parse_m3u8_attributes, parse_resolution, RegexNotFoundError, sanitized_Request, sanitize_filename, str_or_none, str_to_int, strip_or_none, unescapeHTML, unified_strdate, unified_timestamp, update_Request, update_url_query, urljoin, url_basename, url_or_none, xpath_element, xpath_text, xpath_with_ns, ) class InfoExtractor(object): _ready = False _downloader = None _x_forwarded_for_ip = None _GEO_BYPASS = True _GEO_COUNTRIES = None _GEO_IP_BLOCKS = None _WORKING = True
MIT License
dit/dit
dit/math/aitchison.py
alr_inv
python
def alr_inv(xalr): if len(xalr.shape) == 1: single = True else: single = False xalr = np.atleast_2d(xalr) newshape = list(xalr.shape) newshape[1] += 1 x = np.empty(newshape) x[:, :-1] = exp2(xalr) x[:, -1] = 1 x = closure(x) return x[0] if single else x
Returns the inverse additive log-ratio transformation of x. Parameters ---------- xalr : NumPy array, shape (n,) or (k,n) The additive log-ratio transformations of x. Returns ------- x : NumPy array, shape (n+1,) or (k,n+1) The original compositions Notes ----- The sum of the composition is assumed to be 1.
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/math/aitchison.py#L474-L514
import math import numpy as np from dit.exceptions import ditException from dit.math import LogOperations __all__ = ( 'closure', 'subcomposition', 'perturbation', 'power', 'add', 'sub', 'inner', 'norm', 'dist', 'metric', 'clr', 'alr', 'ilr', 'basis', 'clr_inv', 'alr_inv', 'ilr_inv', ) ops = LogOperations(2) exp2 = ops.exp log2 = ops.log def _gm(x): last_axis = -1 x_gm = x.prod(axis=last_axis) ** (1 / x.shape[last_axis]) return x_gm def _log2_gm(x): last_axis = -1 x_loggm = 1 / x.shape[last_axis] * np.log2(x).sum(axis=last_axis) return x_loggm def closure(x): s = x.sum(axis=-1, dtype=float) if np.any(s == 0.0): raise ditException("x contains an unnormalizable distribution.") cx = x / s[..., np.newaxis] return cx def subcomposition(x, indexes): xsub = closure(x[..., indexes]) return xsub def perturbation(x, dx): px = closure(x * dx) return px def power(x, a): a = np.ravel(a)[..., np.newaxis] px = closure(x**a) if len(x.shape) == 1: px = px[0] return px add = perturbation def sub(x, y): z = perturbation(x, power(y, -1.0)) return z def inner(x, y): if len(x.shape) == 1 and len(y.shape) == 1: single = True else: single = False x = np.atleast_2d(x) y = np.atleast_2d(y) x_loggm = _log2_gm(x)[:, np.newaxis] y_loggm = _log2_gm(y)[:, np.newaxis] z = (log2(x) - x_loggm) * (log2(y) - y_loggm) z = z.sum(axis=1) if single: z = z[0] return z def norm(x): n = np.sqrt(inner(x, x)) return n def dist(x, y): d = norm(sub(x, y)) return d metric = dist def clr(x): if len(x.shape) == 1: single = True else: single = False x = np.atleast_2d(x) x_loggm = _log2_gm(x)[:, np.newaxis] y = log2(x) - x_loggm if single: y = y[0] return y def alr(x): if len(x.shape) == 1: single = True else: single = False x = np.atleast_2d(x) y = log2(x[:, :-1]) - log2(x[:, -1][:, np.newaxis]) if single: y = y[0] return y def ilr(x): if len(x.shape) == 1: single = True else: single = False x = np.atleast_2d(x) rng = np.arange(1, x.shape[1]) loggm = 1 / rng * log2(x).cumsum(axis=1)[:, :-1] y = loggm - log2(x[:, 1:]) y *= np.sqrt([i / (i + 1) for i in rng]) if single: y = y[0] return y def ubasis(n): u = np.tri(N=n, M=n + 1, k=1) rng = np.arange(1, n + 1) u *= np.array([1 / i for i in rng])[:, np.newaxis] u.flat[1::n + 2] = -1 u *= np.array([math.sqrt(i / (i + 1)) for i in rng])[:, np.newaxis] return u def basis(n): u = ubasis(n) b = clr_inv(u) return b def clr_inv(xclr): x = closure(exp2(xclr)) return x
BSD 3-Clause New or Revised License
datacamp/viewflow
viewflow/create_dag.py
set_callbacks
python
def set_callbacks(created_task, parsed_task): for callback_type in ["on_success_callback", "on_failure_callback", "on_retry_callback"]: if getattr(created_task, callback_type): callback_str = getattr(created_task, callback_type) callback_fun = getattr(task_callbacks, callback_str) setattr(created_task, callback_type, callback_fun) if parsed_task.get(callback_type): setattr(created_task, callback_type, getattr(task_callbacks, parsed_task.get(callback_type))) if not getattr(created_task, callback_type): setattr(created_task, callback_type, getattr(task_callbacks, callback_type+"_default"))
Set the task-specific callbacks if given any. Use default callbacks if neither the DAG's config.yml nor the task specifies callbacks.
https://github.com/datacamp/viewflow/blob/3123b1e36ca4b9464d4a8cdf13c520384b71c600/viewflow/create_dag.py#L132-L150
import os import yaml import logging import pickle import re import networkx as nx import random from collections import namedtuple from pathlib import Path from datetime import datetime from dataclasses import dataclass from typing import Dict, Any, TypeVar from airflow import DAG from airflow.models import BaseOperator from airflow.sensors.external_task_sensor import ExternalTaskSensor from . import task_callbacks from .adapters.postgresql import postgres_adapter from .adapters.python import python_adapter from .adapters.rmd import rmd_adapter from .adapters.r import r_adapter from .parsers.parse_yml import parse_yml from .parsers.parse_sql import parse_sql from .parsers.parse_python import parse_python from .parsers.parse_rmd import parse_rmd from .parsers.parse_r import parse_r from .parsers.dependencies import get_sql_dependencies from .parsers.dependencies import get_python_dependencies from .parsers.dependencies import get_r_dependencies from .operators.rmd_operator import extract_r O = TypeVar("O", bound=BaseOperator) DAG_CONFIG_FILE = "config.yml" OPERATORS = { "PostgresOperator": postgres_adapter.create_task, "PythonToPostgresOperator": python_adapter.create_task, "RmdOperator": rmd_adapter.create_task, "ROperator": r_adapter.create_task } PARSERS = {".yml": parse_yml, ".sql": parse_sql, ".py": parse_python, ".rmd": parse_rmd, ".r": parse_r} SQL_OPERATORS = ["PostgresOperator"] @dataclass class ParseContext: dag_id: str def parse_dag_dir( input_dir: str, parse_context: ParseContext, dag_config_file=DAG_CONFIG_FILE ) -> Dict[str, Any]: dag_dir = Path(input_dir) dag_config_file_path = dag_dir / dag_config_file dag_config = yaml.safe_load(dag_config_file_path.read_text()) dag_config["start_date"] = datetime.strptime(dag_config["start_date"], "%Y-%m-%d") task_files = sorted(os.listdir(dag_dir)) return { "dag_config": dag_config, "tasks": [ task for task in [ parse_task_file(dag_dir / task_file, parse_context) for task_file in task_files if task_file != dag_config_file ] if task is not None ], } def parse_depends_on_entry(depends_on_entry: str, dag_name: str) -> Dict[str, Any]: match = re.search("^(.*?)/(.*?)$", depends_on_entry) if match is None: return {"task": depends_on_entry, "dag": dag_name} dag, task = match.groups() return {"task": task, "dag": dag} def parse_depends_on(depends_on, dag_name: str): if not (isinstance(depends_on, list)): raise TypeError("'depends_on' should be a list") return [ parse_depends_on_entry(depends_on_entry, dag_name) for depends_on_entry in depends_on ] def parse_task_file( task_file_path: Path, parse_context: ParseContext ) -> Dict[str, Any]: parser = PARSERS.get(task_file_path.suffix.lower()) if parser is None: return None task_config = parser(task_file_path) task_config["task_id"] = task_file_path.stem task_config["task_file_path"] = f"{parse_context.dag_id}/{task_file_path.name}" if "depends_on" in task_config: task_config["depends_on"] = parse_depends_on( task_config["depends_on"], parse_context.dag_id ) return task_config def get_all_dependencies(task, schema_name): if task["type"] == "PostgresOperator": dependencies = get_sql_dependencies(task["content"], schema_name) elif task["type"] == "PythonToPostgresOperator": dependencies = get_python_dependencies(task["content"], schema_name) elif task["type"] == "RmdOperator": r_content = extract_r(task["content"]) dependencies = list(get_r_dependencies(r_content, schema_name, task["dependency_function"]).values()) elif task["type"] == "ROperator": dependencies = list(get_r_dependencies(task["content"], schema_name, task["dependency_function"]).values()) else: dependencies = [] return dependencies
MIT License
ssepulveda/rtgraph
rtgraph/processors/Serial.py
SerialProcess.get_ports
python
def get_ports(): if Architecture.get_os() is OSType.macosx: import glob return glob.glob("/dev/tty.*") else: found_ports = [] for port in list(list_ports.comports()): Log.d(TAG, "found device {}".format(port)) found_ports.append(port.device) return found_ports
Gets a list of the available serial ports. :return: List of available serial ports. :rtype: str list.
https://github.com/ssepulveda/rtgraph/blob/b9d2e3c0da7fc385b8c979e0dc670c211d45c02b/rtgraph/processors/Serial.py#L83-L97
import multiprocessing from time import time import serial from serial.tools import list_ports from rtgraph.common.architecture import Architecture from rtgraph.common.architecture import OSType from rtgraph.core.constants import Constants from rtgraph.common.logger import Logger as Log TAG = "Serial" class SerialProcess(multiprocessing.Process): def __init__(self, parser_process): multiprocessing.Process.__init__(self) self._exit = multiprocessing.Event() self._parser = parser_process self._serial = serial.Serial() Log.i(TAG, "Process ready") def open(self, port, speed=Constants.serial_default_speed, timeout=Constants.serial_timeout_ms): self._serial.port = port self._serial.baudrate = int(speed) self._serial.stopbits = serial.STOPBITS_ONE self._serial.bytesize = serial.EIGHTBITS self._serial.timeout = timeout return self._is_port_available(self._serial.port) def run(self): Log.i(TAG, "Process starting...") if self._is_port_available(self._serial.port): if not self._serial.isOpen(): self._serial.open() Log.i(TAG, "Port opened") timestamp = time() while not self._exit.is_set(): self._parser.add([time() - timestamp, self._serial.readline()]) Log.i(TAG, "Process finished") self._serial.close() else: Log.w(TAG, "Port is not opened") else: Log.w(TAG, "Port is not available") def stop(self): Log.i(TAG, "Process finishing...") self._exit.set() @staticmethod
MIT License
drorlab/atom3d
atom3d/util/results.py
Results3DCNN.get_prediction
python
def get_prediction(self, prediction_fn): targets, predict = None, None return targets, predict
Reads targets and prediction. TODO: Implement this!
https://github.com/drorlab/atom3d/blob/7eacb676f56b4130fd805f4b2901a600170b88f9/atom3d/util/results.py#L16-L24
import os, sys import pickle import torch import numpy as np import pandas as pd import scipy as sp import scipy.stats as stats class Results3DCNN(): def __init__(self, name, reps=[1,2,3]): self.name = name self.reps = reps
MIT License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/billing/models/service_package_quota_history_service_package.py
ServicePackageQuotaHistoryServicePackage.firmware_update_count
python
def firmware_update_count(self, firmware_update_count): if firmware_update_count is None: raise ValueError("Invalid value for `firmware_update_count`, must not be `None`") self._firmware_update_count = firmware_update_count
Sets the firmware_update_count of this ServicePackageQuotaHistoryServicePackage. Size of firmware update quota of this service package. :param firmware_update_count: The firmware_update_count of this ServicePackageQuotaHistoryServicePackage. :type: int
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/billing/models/service_package_quota_history_service_package.py#L98-L109
from pprint import pformat from six import iteritems import re class ServicePackageQuotaHistoryServicePackage(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'expires': 'datetime', 'firmware_update_count': 'int', 'id': 'str', 'previous_id': 'str', 'start_time': 'datetime' } attribute_map = { 'expires': 'expires', 'firmware_update_count': 'firmware_update_count', 'id': 'id', 'previous_id': 'previous_id', 'start_time': 'start_time' } def __init__(self, expires=None, firmware_update_count=None, id=None, previous_id=None, start_time=None): self._expires = expires self._firmware_update_count = firmware_update_count self._id = id self._previous_id = previous_id self._start_time = start_time self.discriminator = None @property def expires(self): return self._expires @expires.setter def expires(self, expires): if expires is None: raise ValueError("Invalid value for `expires`, must not be `None`") self._expires = expires @property def firmware_update_count(self): return self._firmware_update_count @firmware_update_count.setter
Apache License 2.0
ngageoint/sarpy
sarpy/annotation/label.py
LabelSchema.subtypes
python
def subtypes(self): return self._subtypes
The complete dictionary of subtypes of the form `{parent_id : <subids list>}`. Returns ------- Dict[str, List[str]]
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/annotation/label.py#L150-L159
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" import logging import time from collections import OrderedDict import os import json from typing import Union, List, Any, Dict from datetime import datetime import getpass from sarpy.geometry.geometry_elements import _Jsonable, FeatureCollection, Feature from sarpy.compliance import string_types, int_func, integer_types logger = logging.getLogger(__name__) class LabelSchema(object): __slots__ = ( '_version', '_labels', '_classification', '_version_date', '_subtypes', '_parent_types', '_confidence_values', '_permitted_geometries', '_integer_ids', '_maximum_id') def __init__(self, version='1.0', labels=None, version_date=None, classification="UNCLASSIFIED", subtypes=None, confidence_values=None, permitted_geometries=None): self._version_date = None self._labels = None self._subtypes = None self._parent_types = None self._confidence_values = None self._permitted_geometries = None self._integer_ids = True self._maximum_id = None self._version = version self.update_version_date(value=version_date) self._classification = classification self.confidence_values = confidence_values self.permitted_geometries = permitted_geometries self.set_labels_and_subtypes(labels, subtypes) @property def version(self): return self._version @property def version_date(self): return self._version_date def update_version_date(self, value=None): if isinstance(value, string_types): self._version_date = value else: self._version_date = datetime.utcnow().isoformat('T')+'Z' @property def classification(self): return self._classification @property def suggested_next_id(self): return None if self._maximum_id is None else self._maximum_id + 1 @property def labels(self): return self._labels @property
MIT License
paddlepaddle/paddle
python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
_check_grandchild_op_node
python
def _check_grandchild_op_node(op_node, grandchild_op_name): for out1_var_node in op_node.outputs: for out1_op_node in out1_var_node.outputs: for out2_var_node in out1_op_node.outputs: for out2_op_node in out2_var_node.outputs: if out2_op_node.name() == grandchild_op_name: return True return False
Check whether the fake_quant node has a grandchild op node named grandchild_op_name.
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py#L255-L266
import collections import numpy as np from ..... import compat as cpt from .... import core from ....framework import IrGraph from ....framework import IrNode from ....framework import Operator from .... import unique_name from ....framework import Program, program_guard, default_startup_program from ....data import data from ....layers import mean from ....executor import scope_guard from ....framework import _get_paddle_place __all__ = [ 'QuantizationTransformPass', 'QuantizationFreezePass', 'ConvertToInt8Pass', 'TransformForMobilePass', 'OutScaleForTrainingPass', 'OutScaleForInferencePass', 'AddQuantDequantPass' ] _fake_quant_op_list = [ 'fake_quantize_abs_max', 'fake_quantize_range_abs_max', 'fake_quantize_moving_average_abs_max', 'fake_channel_wise_quantize_abs_max' ] _fake_dequant_op_list = [ 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs' ] _fake_quant_dequant_op_list = [ 'fake_quantize_dequantize_moving_average_abs_max' ] _out_scale_op_list = [ "conv2d", "depthwise_conv2d", "mul", "matmul", "matmul_v2", "relu", "leaky_relu", "relu6", "sigmoid", "tanh", "prelu", "swish", "softmax", "batch_norm", "layer_norm", "elementwise_add", "pool2d", "reshape2", "transpose2", "concat", "elementwise_mul", "scale", "slice", "hard_swish", "hard_sigmoid", "conv2d_transpose", "gru", "bilinear_interp", "nearest_interp", "trilinear_interp", "flatten", "flatten2", "transpose", "pad2d", "reshape", "layer_norm", ] _op_real_in_out_name = { "conv2d": [["Input", "Filter"], ["Output"]], "depthwise_conv2d": [["Input", "Filter"], ["Output"]], "conv2d_transpose": [["Input", "Filter"], ["Output"]], "mul": [["X", "Y"], ["Out"]], "matmul": [["X", "Y"], ["Out"]], "matmul_v2": [["X", "Y"], ["Out"]], "pool2d": [["X"], ["Out"]], "elementwise_add": [["X", "Y"], ["Out"]], "concat": [["X"], ["Out"]], "softmax": [["X"], ["Out"]], "argmax": [["X"], ["Out"]], "transpose": [["X"], ["Out"]], "equal": [["X", "Y"], ["Out"]], "gather": [["X"], ["Out"]], "greater_equal": [["X", "Y"], ["Out"]], "greater_than": [["X", "Y"], ["Out"]], "less_equal": [["X", "Y"], ["Out"]], "less_than": [["X", "Y"], ["Out"]], "mean": [["X"], ["Out"]], "not_equal": [["X", "Y"], ["Out"]], "reshape": [["X"], ["Out"]], "reshape2": [["X"], ["Out"]], "transpose2": [["X"], ["Out"]], "bilinear_interp": [["X"], ["Out"]], "nearest_interp": [["X"], ["Out"]], "trilinear_interp": [["X"], ["Out"]], "slice": [["Input"], ["Out"]], "squeeze": [["X"], ["Out"]], "elementwise_sub": [["X", "Y"], ["Out"]], "relu": [["X"], ["Out"]], "relu6": [["X"], ["Out"]], "leaky_relu": [["X"], ["Out"]], "prelu": [["X"], ["Out"]], "tanh": [["X"], ["Out"]], "swish": [["X"], ["Out"]], "dropout": [["X"], ["Out"]], "batch_norm": [["X"], ["Y"]], "layer_norm": [["X"], ["Y"]], "sigmoid": [["X"], ["Out"]], "elementwise_mul": [["X", "Y"], ["Out"]], "scale": [["X"], ["Out"]], "hard_swish": [["X"], ["Out"]], "hard_sigmoid": [["X"], ["Out"]], "gru": [["Input", "Weight"], ["Hidden"]], "lstm": [["Input", "Weight"], ["Hidden"]], "pad2d": [["X"], ["Out"]], "flatten": [["X"], ["Out"]], "flatten2": [["X"], ["Out"]], "unsqueeze2": [["X"], ["Out"]], "flatten_contiguous_range": [['X'], ["Out", "XShape"]], } _conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose'] _channelwise_quant_axis1_ops = [ 'conv2d_transpose', 'mul', 'matmul', 'matmul_v2' ] def _get_op_input_var_names(op): assert isinstance(op, (IrNode, Operator)), "The input op should be IrNode or Operator." var_names = [] op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return [] name_list = _op_real_in_out_name[op_name][0] for name in name_list: var_name = op.input(name) if isinstance(var_name, list): var_names.extend(var_name) else: var_names.append(var_name) return var_names def _get_input_name_index(op, input_var_name): assert isinstance(op, (IrNode, Operator)), "The input op should be IrNode or Operator." op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return None res = None for argname in _op_real_in_out_name[op_name][0]: var_names = op.input(argname) for index, name in enumerate(var_names): if name == input_var_name: res = (argname, index) return res def _get_op_output_var_names(op): assert isinstance(op, (IrNode, Operator)), "The input op should be IrNode or Operator." var_names = [] op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return [] name_list = _op_real_in_out_name[op_name][1] for name in name_list: var_name = op.output(name) if isinstance(var_name, list): var_names.extend(var_name) else: var_names.append(var_name) return var_names def _get_output_name_index(op, output_var_name): assert isinstance(op, (IrNode, Operator)), "The input op should be IrNode or Operator." op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return None name_list = _op_real_in_out_name[op_name][1] res = None for name in name_list: var_name = op.output(name) for index, val in enumerate(var_name): if val == output_var_name: res = (name, index) return res def _init_var_node(var_node, value, scope, place): assert isinstance(value, np.ndarray), 'The type of value should be numpy array.' assert scope is not None, 'The scope cannot be set None.' assert place is not None, 'The place cannot be set None.' tensor = scope.var(var_node.name()).get_tensor() tensor.set(value, place) def _is_input_all_not_persistable(graph, op_node): is_input_all_not_persistable = True for var_name in _get_op_input_var_names(op_node): in_node = graph._find_node_by_name(op_node.inputs, var_name) is_input_all_not_persistable = (is_input_all_not_persistable and (not in_node.persistable())) return is_input_all_not_persistable
Apache License 2.0
montvieux/plark_ai_public
Components/stable-baselines/stable_baselines/deepq/replay_buffer.py
PrioritizedReplayBuffer.sample
python
def sample(self, batch_size, beta=0): assert beta > 0 idxes = self._sample_proportional(batch_size) weights = [] p_min = self._it_min.min() / self._it_sum.sum() max_weight = (p_min * len(self._storage)) ** (-beta) p_sample = self._it_sum[idxes] / self._it_sum.sum() weights = (p_sample * len(self._storage)) ** (-beta) / max_weight encoded_sample = self._encode_sample(idxes) return tuple(list(encoded_sample) + [weights, idxes])
Sample a batch of experiences. compared to ReplayBuffer.sample it also returns importance weights and idxes of sampled experiences. :param batch_size: (int) How many transitions to sample. :param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction) :return: - obs_batch: (np.ndarray) batch of observations - act_batch: (numpy float) batch of actions executed given obs_batch - rew_batch: (numpy float) rewards received as results of executing act_batch - next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch - done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. - weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of each sampled transition - idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences
https://github.com/montvieux/plark_ai_public/blob/eb68a76f1dbfed1b0ef09e358aa8678d8ea8291a/Components/stable-baselines/stable_baselines/deepq/replay_buffer.py#L144-L174
import random import numpy as np from stable_baselines.common.segment_tree import SumSegmentTree, MinSegmentTree class ReplayBuffer(object): def __init__(self, size): self._storage = [] self._maxsize = size self._next_idx = 0 def __len__(self): return len(self._storage) @property def storage(self): return self._storage @property def buffer_size(self): return self._maxsize def can_sample(self, n_samples): return len(self) >= n_samples def is_full(self): return len(self) == self.buffer_size def add(self, obs_t, action, reward, obs_tp1, done): data = (obs_t, action, reward, obs_tp1, done) if self._next_idx >= len(self._storage): self._storage.append(data) else: self._storage[self._next_idx] = data self._next_idx = (self._next_idx + 1) % self._maxsize def _encode_sample(self, idxes): obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], [] for i in idxes: data = self._storage[i] obs_t, action, reward, obs_tp1, done = data obses_t.append(np.array(obs_t, copy=False)) actions.append(np.array(action, copy=False)) rewards.append(reward) obses_tp1.append(np.array(obs_tp1, copy=False)) dones.append(done) return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones) def sample(self, batch_size, **_kwargs): idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)] return self._encode_sample(idxes) class PrioritizedReplayBuffer(ReplayBuffer): def __init__(self, size, alpha): super(PrioritizedReplayBuffer, self).__init__(size) assert alpha >= 0 self._alpha = alpha it_capacity = 1 while it_capacity < size: it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0 def add(self, obs_t, action, reward, obs_tp1, done): idx = self._next_idx super().add(obs_t, action, reward, obs_tp1, done) self._it_sum[idx] = self._max_priority ** self._alpha self._it_min[idx] = self._max_priority ** self._alpha def _sample_proportional(self, batch_size): mass = [] total = self._it_sum.sum(0, len(self._storage) - 1) mass = np.random.random(size=batch_size) * total idx = self._it_sum.find_prefixsum_idx(mass) return idx
Apache License 2.0
newville/pyshortcuts
pyshortcuts/darwin.py
fix_anacondapy_pythonw
python
def fix_anacondapy_pythonw(fname): with open(fname, 'r') as fh: try: lines = fh.readlines() except IOError: lines = ['-'] firstline = lines[0][:-1].strip() if firstline.startswith('#!') and 'python' in firstline: firstline = '#!/usr/bin/env pythonw' fh = open(fname, 'w') fh.write('%s\n' % firstline) fh.write("".join(lines[1:])) fh.close()
fix shebang line for scripts using anaconda python to use 'pythonw' instead of 'python'
https://github.com/newville/pyshortcuts/blob/3da6bfb32be033fcf4df0eabf2f6335c7c24b567/pyshortcuts/darwin.py#L36-L52
import os import sys import shutil from .shortcut import shortcut from .linux import get_homedir, get_desktop from . import UserFolders scut_ext = 'app' ico_ext = ('icns',) def get_startmenu(): return '' def get_folders(): return UserFolders(get_homedir(), get_desktop(), get_startmenu())
MIT License
michaelaquilina/python-tools
lib/jedi/parser/tree.py
IfStmt.node_after_else
python
def node_after_else(self, node): for c in self.children: if c == 'else': if node.start_pos > c.start_pos: return True else: return False
Checks if a node is defined after `else`.
https://github.com/michaelaquilina/python-tools/blob/2fbee20f9ce286ba55050adafcea8bb43c0922b3/lib/jedi/parser/tree.py#L863-L872
import os import re from inspect import cleandoc from itertools import chain import textwrap from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr, literal_eval, use_metaclass, unicode) from jedi import cache def is_node(node, *symbol_names): try: type = node.type except AttributeError: return False else: return type in symbol_names class PositionModifier(object): def __init__(self): self.line = 0 zero_position_modifier = PositionModifier() class DocstringMixin(object): __slots__ = () @property def raw_doc(self): if isinstance(self, Module): node = self.children[0] elif isinstance(self, ClassOrFunc): node = self.children[self.children.index(':') + 1] if is_node(node, 'suite'): node = node.children[2] else: simple_stmt = self.parent c = simple_stmt.parent.children index = c.index(simple_stmt) if not index: return '' node = c[index - 1] if is_node(node, 'simple_stmt'): node = node.children[0] if node.type == 'string': cleaned = cleandoc(literal_eval(node.value)) if is_py3 or isinstance(cleaned, unicode): return cleaned else: return unicode(cleaned, 'UTF-8', 'replace') return '' class Base(object): __slots__ = () def isinstance(self, *cls): return isinstance(self, cls) @Python3Method def get_parent_until(self, classes=(), reverse=False, include_current=True): if type(classes) not in (tuple, list): classes = (classes,) scope = self if include_current else self.parent while scope.parent is not None: if classes and reverse != scope.isinstance(*classes): break scope = scope.parent return scope def get_parent_scope(self, include_flows=False): scope = self.parent while scope is not None: if include_flows and isinstance(scope, Flow): return scope if scope.is_scope(): break scope = scope.parent return scope def is_scope(self): return False class Leaf(Base): __slots__ = ('position_modifier', 'value', 'parent', '_start_pos', 'prefix') def __init__(self, position_modifier, value, start_pos, prefix=''): self.position_modifier = position_modifier self.value = value self._start_pos = start_pos self.prefix = prefix self.parent = None @property def start_pos(self): return self._start_pos[0] + self.position_modifier.line, self._start_pos[1] @start_pos.setter def start_pos(self, value): self._start_pos = value[0] - self.position_modifier.line, value[1] @property def end_pos(self): return (self._start_pos[0] + self.position_modifier.line, self._start_pos[1] + len(self.value)) def move(self, line_offset, column_offset): self._start_pos = (self._start_pos[0] + line_offset, self._start_pos[1] + column_offset) def get_previous(self): node = self while True: c = node.parent.children i = c.index(self) if i == 0: node = node.parent if node.parent is None: raise IndexError('Cannot access the previous element of the first one.') else: node = c[i - 1] break while True: try: node = node.children[-1] except AttributeError: return node def get_code(self): return self.prefix + self.value def next_sibling(self): for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i + 1] except IndexError: return None def prev_sibling(self): for i, child in enumerate(self.parent.children): if child is self: if i == 0: return None return self.parent.children[i - 1] @utf8_repr def __repr__(self): return "<%s: %s>" % (type(self).__name__, self.value) class LeafWithNewLines(Leaf): __slots__ = () @property def end_pos(self): end_pos_line, end_pos_col = self.start_pos lines = self.value.split('\n') end_pos_line += len(lines) - 1 if self.start_pos[0] == end_pos_line: end_pos_col += len(lines[-1]) else: end_pos_col = len(lines[-1]) return end_pos_line, end_pos_col @utf8_repr def __repr__(self): return "<%s: %r>" % (type(self).__name__, self.value) class Whitespace(LeafWithNewLines): __slots__ = () type = 'whitespace' class Name(Leaf): type = 'name' __slots__ = () def __str__(self): return self.value def __unicode__(self): return self.value def __repr__(self): return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, self.start_pos[0], self.start_pos[1]) def get_definition(self): scope = self while scope.parent is not None: parent = scope.parent if scope.isinstance(Node, Name) and parent.type != 'simple_stmt': if scope.type == 'testlist_comp': try: if isinstance(scope.children[1], CompFor): return scope.children[1] except IndexError: pass scope = parent else: break return scope def is_definition(self): stmt = self.get_definition() if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'): return self == stmt.name elif stmt.type == 'for_stmt': return self.start_pos < stmt.children[2].start_pos elif stmt.type == 'try_stmt': return self.prev_sibling() == 'as' else: return stmt.type in ('expr_stmt', 'import_name', 'import_from', 'comp_for', 'with_stmt') and self in stmt.get_defined_names() def assignment_indexes(self): indexes = [] node = self.parent compare = self while node is not None: if is_node(node, 'testlist_comp', 'testlist_star_expr', 'exprlist'): for i, child in enumerate(node.children): if child == compare: indexes.insert(0, int(i / 2)) break else: raise LookupError("Couldn't find the assignment.") elif isinstance(node, (ExprStmt, CompFor)): break compare = node node = node.parent return indexes class Literal(LeafWithNewLines): __slots__ = () def eval(self): return literal_eval(self.value) class Number(Literal): type = 'number' __slots__ = () class String(Literal): type = 'string' __slots__ = () class Operator(Leaf): type = 'operator' __slots__ = () def __str__(self): return self.value def __eq__(self, other): if isinstance(other, Operator): return self is other else: return self.value == other def __ne__(self, other): return self.value != other def __hash__(self): return hash(self.value) class Keyword(Leaf): type = 'keyword' __slots__ = () def __eq__(self, other): if isinstance(other, Keyword): return self is other return self.value == other def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.value) class BaseNode(Base): __slots__ = ('children', 'parent') type = None def __init__(self, children): for c in children: c.parent = self self.children = children self.parent = None def move(self, line_offset, column_offset): for c in self.children: c.move(line_offset, column_offset) @property def start_pos(self): return self.children[0].start_pos @property def end_pos(self): return self.children[-1].end_pos def get_code(self): return "".join(c.get_code() for c in self.children) @Python3Method def name_for_position(self, position): for c in self.children: if isinstance(c, Leaf): if isinstance(c, Name) and c.start_pos <= position <= c.end_pos: return c else: result = c.name_for_position(position) if result is not None: return result return None @Python3Method def get_statement_for_position(self, pos): for c in self.children: if c.start_pos <= pos <= c.end_pos: if c.type not in ('decorated', 'simple_stmt', 'suite') and not isinstance(c, (Flow, ClassOrFunc)): return c else: try: return c.get_statement_for_position(pos) except AttributeError: pass return None def first_leaf(self): try: return self.children[0].first_leaf() except AttributeError: return self.children[0] @utf8_repr def __repr__(self): code = self.get_code().replace('\n', ' ') if not is_py3: code = code.encode(encoding, 'replace') return "<%s: %s@%s,%s>" % (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) class Node(BaseNode): __slots__ = ('type',) def __init__(self, type, children): super(Node, self).__init__(children) self.type = type def __repr__(self): return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) class IsScopeMeta(type): def __instancecheck__(self, other): return other.is_scope() class IsScope(use_metaclass(IsScopeMeta)): pass class Scope(BaseNode, DocstringMixin): __slots__ = ('names_dict',) def __init__(self, children): super(Scope, self).__init__(children) @property def returns(self): return self._search_in_scope(ReturnStmt) @property def subscopes(self): return self._search_in_scope(Scope) @property def flows(self): return self._search_in_scope(Flow) @property def imports(self): return self._search_in_scope(Import) @Python3Method def _search_in_scope(self, typ): def scan(children): elements = [] for element in children: if isinstance(element, typ): elements.append(element) if is_node(element, 'suite', 'simple_stmt', 'decorated') or isinstance(element, Flow): elements += scan(element.children) return elements return scan(self.children) @property def statements(self): return self._search_in_scope((ExprStmt, KeywordStatement)) def is_scope(self): return True def __repr__(self): try: name = self.path except AttributeError: try: name = self.name except AttributeError: name = self.command return "<%s: %s@%s-%s>" % (type(self).__name__, name, self.start_pos[0], self.end_pos[0]) def walk(self): yield self for s in self.subscopes: for scope in s.walk(): yield scope for r in self.statements: while isinstance(r, Flow): for scope in r.walk(): yield scope r = r.next class Module(Scope): __slots__ = ('path', 'global_names', 'used_names', '_name', 'error_statement_stacks') type = 'file_input' def __init__(self, children): super(Module, self).__init__(children) self.path = None @property @cache.underscore_memoization def name(self): if self.path is None: string = '' else: sep = (re.escape(os.path.sep),) * 2 r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path) string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) p = (1, 0) name = Name(zero_position_modifier, string, p) name.parent = self return name @property def has_explicit_absolute_import(self): for imp in self.imports: if imp.type == 'import_from' and imp.level == 0: for path in imp.paths(): if [str(name) for name in path] == ['__future__', 'absolute_import']: return True return False class Decorator(BaseNode): type = 'decorator' __slots__ = () class ClassOrFunc(Scope): __slots__ = () @property def name(self): return self.children[1] def get_decorators(self): decorated = self.parent if is_node(decorated, 'decorated'): if is_node(decorated.children[0], 'decorators'): return decorated.children[0].children else: return decorated.children[:1] else: return [] class Class(ClassOrFunc): type = 'classdef' __slots__ = () def __init__(self, children): super(Class, self).__init__(children) def get_super_arglist(self): if self.children[2] != '(': return None else: if self.children[3] == ')': return None else: return self.children[3] @property def doc(self): docstr = self.raw_doc for sub in self.subscopes: if str(sub.name) == '__init__': return '%s\n\n%s' % ( sub.get_call_signature(func_name=self.name), docstr) return docstr def _create_params(parent, argslist_list): def check_python2_nested_param(node): return node.type == 'tfpdef' and node.children[0] == '(' try: first = argslist_list[0] except IndexError: return [] if first.type in ('name', 'tfpdef'): if check_python2_nested_param(first): return [] else: return [Param([first], parent)] else: children = first.children params = [] start = 0 for end, child in enumerate(children + [None], 1): if child is None or child == ',': new_children = children[start:end] if new_children: if check_python2_nested_param(new_children[0]): continue params.append(Param(new_children, parent)) start = end return params class Function(ClassOrFunc): __slots__ = ('listeners',) type = 'funcdef' def __init__(self, children): super(Function, self).__init__(children) self.listeners = set() parameters = self.children[2] parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1]) @property def params(self): return self.children[2].children[1:-1] @property def name(self): return self.children[1] @property def yields(self): return self._search_in_scope(YieldExpr) def is_generator(self): return bool(self.yields) def annotation(self): try: return self.children[6] except IndexError: return None def get_call_signature(self, width=72, func_name=None): func_name = func_name or self.children[1] code = unicode(func_name) + self.children[2].get_code() return '\n'.join(textwrap.wrap(code, width)) @property def doc(self): docstr = self.raw_doc return '%s\n\n%s' % (self.get_call_signature(), docstr) class Lambda(Function): type = 'lambda' __slots__ = () def __init__(self, children): super(Function, self).__init__(children) self.listeners = set() lst = self.children[1:-2] self.children[1:-2] = _create_params(self, lst) @property def params(self): return self.children[1:-2] def is_generator(self): return False def yields(self): return [] def __repr__(self): return "<%s@%s>" % (self.__class__.__name__, self.start_pos) class Flow(BaseNode): __slots__ = () class IfStmt(Flow): type = 'if_stmt' __slots__ = () def check_nodes(self): for i, c in enumerate(self.children): if c in ('elif', 'if'): yield self.children[i + 1] def node_in_which_check_node(self, node): for check_node in reversed(list(self.check_nodes())): if check_node.start_pos < node.start_pos: return check_node
MIT License
apache/bloodhound
bloodhound_dashboard/bhdashboard/widgets/ticket.py
TicketGroupStatsWidget.render_widget
python
def render_widget(self, name, context, options): req = context.req params = ('query', 'stats_provider', 'skin', 'title', 'legend', 'desc', 'view') qstr, pnm, skin, title, legend, desc, view = self.bind_params(name, options, *params) statsp = resolve_ep_class(ITicketGroupStatsProvider, self, pnm, default=RoadmapModule(self.env).stats_provider) if skin is not None : skin = (skin or '').split('-', 2) tickets = exec_query(self.env, req, qstr) tickets = apply_ticket_permissions(self.env, req, tickets) stat = get_ticket_stats(statsp, tickets) add_stylesheet(req, 'dashboard/css/bootstrap.css') add_stylesheet(req, 'dashboard/css/bootstrap-responsive.css') add_stylesheet(req, 'dashboard/css/roadmap.css') return 'widget_progress.html', { 'title' : title, 'data' : dict( desc=desc, legend=legend, bar_styles=skin, stats=stat, view=view, ), }, context
Prepare ticket stats
https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_dashboard/bhdashboard/widgets/ticket.py#L294-L322
from itertools import imap, islice from urllib import urlencode from genshi.builder import tag from genshi.core import Markup from trac.core import implements, TracError from trac.ticket.api import TicketSystem from trac.ticket.query import Query from trac.ticket.roadmap import apply_ticket_permissions, get_ticket_stats, ITicketGroupStatsProvider, RoadmapModule from trac.util.text import unicode_urlencode from trac.web.chrome import add_stylesheet from bhdashboard.api import DateField, EnumField, InvalidWidgetArgument, ListField from bhdashboard.widgets.query import exec_query from bhdashboard.util import dummy_request, merge_links, minmax, pretty_wrapper, resolve_ep_class, trac_version, trac_tags from bhdashboard.util.widgets import WidgetBase, check_widget_name from bhdashboard.util.translation import _ from multiproduct.env import Product, ProductEnvironment class TicketFieldValuesWidget(WidgetBase): DASH_ITEM_HREF_MAP = {'milestone': ('milestone',), } def get_widget_params(self, name): return { 'field' : { 'desc' : """Target ticket field. """ """Required if no group in `query`.""", }, 'query' : { 'desc' : """TracQuery used to filter target tickets.""", }, 'title' : { 'desc' : """Widget title""", }, 'verbose' : { 'desc' : """Show frequency next to each value""", 'default' : False, 'type' : bool, }, 'threshold' : { 'desc' : """Filter items having smaller frequency""", 'type' : int, }, 'max' : { 'default' : 0, 'desc' : """Limit the number of items displayed""", 'type' : int }, 'view' : { 'desc' : """Display mode. Should be one of the following - `list` : Unordered value list (default) - `cloud` : Similar to tag cloud """, 'default' : 'list', 'type' : EnumField('list', 'cloud', 'table', 'compact'), }, } get_widget_params = pretty_wrapper(get_widget_params, check_widget_name) def render_widget(self, name, context, options): req = context.req params = ('field', 'query', 'verbose', 'threshold', 'max', 'title', 'view') fieldnm, query, verbose, threshold, maxitems, title, view = self.bind_params(name, options, *params) field_maps = {'type': {'admin_url': 'type', 'title': _('Types'), }, 'status': {'admin_url': None, 'title': _('Statuses'), }, 'priority': {'admin_url': 'priority', 'title': _('Priorities'), }, 'milestone': {'admin_url': 'milestones', 'title': _('Milestones'), }, 'component': {'admin_url': 'components', 'title': _('Components'), }, 'version': {'admin_url': 'versions', 'title': _('Versions'), }, 'severity': {'admin_url': 'severity', 'title': _('Severities'), }, 'resolution': {'admin_url': 'resolution', 'title': _('Resolutions'), }, } _field = [] def check_field_name(): if fieldnm is None: raise InvalidWidgetArgument('field', 'Missing ticket field') tsys = self.env[TicketSystem] if tsys is None: raise TracError(_('Error loading ticket system (disabled?)')) for field in tsys.get_ticket_fields(): if field['name'] == fieldnm: _field.append(field) break else: if fieldnm in field_maps: admin_suffix = field_maps.get(fieldnm)['admin_url'] if 'TICKET_ADMIN' in req.perm and admin_suffix is not None: hint = _('You can add one or more ' '<a href="%(url)s">here</a>.', url=req.href.admin('ticket', admin_suffix)) else: hint = _('Contact your administrator for further details') return 'widget_alert.html', { 'title' : Markup(field_maps[fieldnm]['title']), 'data' : dict(msgtype='info', msglabel="Note", msgbody=Markup(_('''No values are defined for ticket field <em>%(field)s</em>. %(hint)s''', field=fieldnm, hint=hint)) ) }, context else: raise InvalidWidgetArgument('field', 'Unknown ticket field %s' % (fieldnm,)) return None if query is None : data = check_field_name() if data is not None: return data field = _field[0] if field.get('custom'): sql = "SELECT COALESCE(value, ''), count(COALESCE(value, ''))" " FROM ticket_custom " " WHERE name='%(name)s' GROUP BY COALESCE(value, '')" else: sql = "SELECT COALESCE(%(name)s, ''), " "count(COALESCE(%(name)s, '')) FROM ticket " "GROUP BY COALESCE(%(name)s, '')" sql = sql % field db_query = req.perm.env.db_query if isinstance(req.perm.env, ProductEnvironment) else req.perm.env.db_direct_query with db_query as db: cursor = db.cursor() cursor.execute(sql) items = cursor.fetchall() QUERY_COLS = ['id', 'summary', 'owner', 'type', 'status', 'priority'] item_link= lambda item: req.href.query(col=QUERY_COLS + [fieldnm], **{fieldnm:item[0]}) else: query = Query.from_string(self.env, query, group=fieldnm) if query.group is None: data = check_field_name() if data is not None: return data raise InvalidWidgetArgument('field', 'Invalid ticket field for ticket groups') fieldnm = query.group sql, v = query.get_sql() sql = "SELECT COALESCE(%(name)s, '') , count(COALESCE(%(name)s, ''))" "FROM (%(sql)s) AS foo GROUP BY COALESCE(%(name)s, '')" % { 'name' : fieldnm, 'sql' : sql } db = self.env.get_db_cnx() try : cursor = db.cursor() cursor.execute(sql, v) items = cursor.fetchall() finally: cursor.close() query_href = query.get_href(req.href) item_link= lambda item: query_href + '&' + unicode_urlencode([(fieldnm, item[0])]) if fieldnm in self.DASH_ITEM_HREF_MAP: def dash_item_link(item): if item[0]: args = self.DASH_ITEM_HREF_MAP[fieldnm] + (item[0],) return req.href(*args) else: return item_link(item) else: dash_item_link = item_link if title is None: heading = _(fieldnm.capitalize()) else: heading = None return 'widget_cloud.html', { 'title' : title, 'data' : dict( bounds=minmax(items, lambda x: x[1]), item_link=dash_item_link, heading=heading, items=items, verbose=verbose, view=view, ), }, context render_widget = pretty_wrapper(render_widget, check_widget_name) class TicketGroupStatsWidget(WidgetBase): def get_widget_params(self, name): return { 'query' : { 'default' : 'status!=closed', 'desc' : """Query string""", }, 'stats_provider' : { 'desc' : """Name of the component implementing `ITicketGroupStatsProvider`, which is used to collect statistics on groups of tickets.""", 'default' : 'DefaultTicketGroupStatsProvider' }, 'skin' : { 'desc' : """Look and feel of the progress bar""", 'type' : EnumField('info', 'success', 'warning', 'danger', 'info-stripped', 'success-stripped', 'warning-stripped', 'danger-stripped') }, 'title' : { 'desc' : """Widget title""", }, 'legend' : { 'desc' : """Text on top of the progress bar""", }, 'desc' : { 'desc' : """Descriptive (wiki) text""", }, 'view' : { 'desc' : """Display mode to render progress info""", 'type' : EnumField('compact', 'standard') }, } get_widget_params = pretty_wrapper(get_widget_params, check_widget_name)
Apache License 2.0
better/convoys
convoys/regression.py
GeneralizedGamma.predict_ci
python
def predict_ci(self, x, t, ci=0.8): M = self.predict_posteriori(x, t) y = numpy.mean(M, axis=-1) y_lo = numpy.percentile(M, (1-ci)*50, axis=-1) y_hi = numpy.percentile(M, (1+ci)*50, axis=-1) return numpy.stack((y, y_lo, y_hi), axis=-1)
Works like :meth:`predict` but produces a confidence interval. Requires the model to be fit with `ci = True`. The return value will contain one more dimension than for :meth:`predict`, and the last dimension will have size 3, containing the mean, the lower bound of the confidence interval, and the upper bound of the confidence interval.
https://github.com/better/convoys/blob/99e832d2170ba9670e5e16bb3f632ac9055291f3/convoys/regression.py#L310-L323
from convoys import autograd_scipy_monkeypatch import autograd from autograd_gamma import gammainc from deprecated.sphinx import deprecated import emcee import numpy from scipy.special import gammaincinv from autograd.scipy.special import expit, gammaln from autograd.numpy import isnan, exp, dot, log, sum import progressbar import scipy.optimize import warnings __all__ = ['Exponential', 'Weibull', 'Gamma', 'GeneralizedGamma'] def generalized_gamma_loss(x, X, B, T, W, fix_k, fix_p, hierarchical, flavor, callback=None): k = exp(x[0]) if fix_k is None else fix_k p = exp(x[1]) if fix_p is None else fix_p log_sigma_alpha = x[2] log_sigma_beta = x[3] a = x[4] b = x[5] n_features = int((len(x)-6)/2) alpha = x[6:6+n_features] beta = x[6+n_features:6+2*n_features] lambd = exp(dot(X, alpha)+a) log_pdf = log(p) + (k*p) * log(lambd) - gammaln(k) + (k*p-1) * log(T) - (T*lambd)**p cdf = gammainc(k, (T*lambd)**p) if flavor == 'logistic': c = expit(dot(X, beta)+b) LL_observed = log(c) + log_pdf LL_censored = log((1 - c) + c * (1 - cdf)) elif flavor == 'linear': c = dot(X, beta)+b LL_observed = -(1 - c)**2 + log_pdf LL_censored = -(c*cdf)**2 LL_data = sum( W * B * LL_observed + W * (1 - B) * LL_censored, 0) if hierarchical: LL_prior_a = -4*log_sigma_alpha - 1/exp(log_sigma_alpha)**2 - dot(alpha, alpha) / (2*exp(log_sigma_alpha)**2) - n_features*log_sigma_alpha LL_prior_b = -4*log_sigma_beta - 1/exp(log_sigma_beta)**2 - dot(beta, beta) / (2*exp(log_sigma_beta)**2) - n_features*log_sigma_beta LL = LL_prior_a + LL_prior_b + LL_data else: LL = LL_data if isnan(LL): return -numpy.inf if callback is not None: callback(LL) return LL class RegressionModel(object): pass class GeneralizedGamma(RegressionModel): def __init__(self, mcmc=False, fix_k=None, fix_p=None, hierarchical=True, flavor='logistic', ci=None): self._mcmc = mcmc self._fix_k = fix_k self._fix_p = fix_p self._hierarchical = hierarchical self._flavor = flavor if ci is not None: warnings.warn('The `ci` argument is deprecated in 0.2.1 in favor ' ' of `mcmc`.', DeprecationWarning) self._mcmc = ci def fit(self, X, B, T, W=None): if W is None: W = numpy.ones(len(X)) X, B, T, W = (Z if type(Z) == numpy.ndarray else numpy.array(Z) for Z in (X, B, T, W)) keep_indexes = (T > 0) & (B >= 0) & (B <= 1) & (W >= 0) if sum(keep_indexes) < X.shape[0]: n_removed = X.shape[0] - sum(keep_indexes) warnings.warn('Warning! Removed %d/%d entries from inputs where ' 'T <= 0 or B not 0/1 or W < 0' % (n_removed, len(X))) X, B, T, W = (Z[keep_indexes] for Z in (X, B, T, W)) n_features = X.shape[1] x0 = numpy.zeros(6+2*n_features) x0[0] = +1 if self._fix_k is None else log(self._fix_k) x0[1] = -1 if self._fix_p is None else log(self._fix_p) args = (X, B, T, W, self._fix_k, self._fix_p, self._hierarchical, self._flavor) bar = progressbar.ProgressBar(widgets=[ progressbar.Variable('loss', width=15, precision=9), ' ', progressbar.BouncingBar(), ' ', progressbar.Counter(width=6), ' [', progressbar.Timer(), ']']) def callback(LL, value_history=[]): value_history.append(LL) bar.update(len(value_history), loss=LL) f = lambda x: -generalized_gamma_loss(x, *args, callback=callback) jac = autograd.grad(lambda x: -generalized_gamma_loss(x, *args)) res = scipy.optimize.minimize(f, x0, jac=jac, method='SLSQP', options={'maxiter': 9999}) if not res.success: raise Exception('Optimization failed with message: %s' % res.message) result = {'map': res.x} if self._fix_k: result['map'][0] = log(self._fix_k) if self._fix_p: result['map'][1] = log(self._fix_p) gradient = jac(result['map']) gradient_norm = numpy.dot(gradient, gradient) if gradient_norm >= 1e-2 * len(X): warnings.warn('Might not have found a local minimum! ' 'Norm of gradient is %f' % gradient_norm) if self._mcmc: dim, = res.x.shape n_walkers = 5*dim sampler = emcee.EnsembleSampler( nwalkers=n_walkers, ndim=dim, log_prob_fn=generalized_gamma_loss, args=args, ) mcmc_initial_noise = 1e-3 p0 = [result['map'] + mcmc_initial_noise * numpy.random.randn(dim) for i in range(n_walkers)] n_burnin = 100 n_steps = numpy.ceil(2000. / n_walkers) n_iterations = n_burnin + n_steps bar = progressbar.ProgressBar(max_value=n_iterations, widgets=[ progressbar.Percentage(), ' ', progressbar.Bar(), ' %d walkers [' % n_walkers, progressbar.AdaptiveETA(), ']']) for i, _ in enumerate(sampler.sample(p0, iterations=n_iterations)): bar.update(i+1) result['samples'] = sampler.chain[:, n_burnin:, :] .reshape((-1, dim)).T if self._fix_k: result['samples'][0, :] = log(self._fix_k) if self._fix_p: result['samples'][1, :] = log(self._fix_p) self.params = {k: { 'k': exp(data[0]), 'p': exp(data[1]), 'a': data[4], 'b': data[5], 'alpha': data[6:6+n_features].T, 'beta': data[6+n_features:6+2*n_features].T, } for k, data in result.items()} def _predict(self, params, x, t): lambd = exp(dot(x, params['alpha'].T) + params['a']) if self._flavor == 'logistic': c = expit(dot(x, params['beta'].T) + params['b']) elif self._flavor == 'linear': c = dot(x, params['beta'].T) + params['b'] M = c * gammainc( params['k'], (t*lambd)**params['p']) return M def predict_posteriori(self, x, t): x = numpy.array(x) t = numpy.array(t) assert self._mcmc params = self.params['samples'] t = numpy.expand_dims(t, -1) return self._predict(params, x, t)
MIT License
hosford42/xcs
xcs/scenarios.py
ScenarioObserver.more
python
def more(self): more = self.wrapped.more() if not self.steps % 100: self.logger.info('Steps completed: %d', self.steps) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) if not more: self.logger.info('Run completed.') self.logger.info('Total steps: %d', self.steps) self.logger.info('Total reward received: %.5f', self.total_reward) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) return more
Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run.
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/scenarios.py#L563-L592
__author__ = 'Aaron Hosford' __all__ = [ 'HaystackProblem', 'MUXProblem', 'Scenario', 'ScenarioObserver', 'PreClassifiedData', 'UnclassifiedData', ] import logging import random from abc import ABCMeta, abstractmethod from . import numpy from . import bitstrings class Scenario(metaclass=ABCMeta): @property @abstractmethod def is_dynamic(self): raise NotImplementedError() @abstractmethod def get_possible_actions(self): raise NotImplementedError() @abstractmethod def reset(self): raise NotImplementedError() @abstractmethod def sense(self): raise NotImplementedError() @abstractmethod def execute(self, action): raise NotImplementedError() @abstractmethod def more(self): raise NotImplementedError() class MUXProblem(Scenario): def __init__(self, training_cycles=10000, address_size=3): assert isinstance(training_cycles, int) and training_cycles > 0 assert isinstance(address_size, int) and address_size > 0 self.address_size = address_size self.current_situation = None self.possible_actions = (True, False) self.initial_training_cycles = training_cycles self.remaining_cycles = training_cycles @property def is_dynamic(self): return False def get_possible_actions(self): return self.possible_actions def reset(self): self.remaining_cycles = self.initial_training_cycles def sense(self): self.current_situation = bitstrings.BitString([ random.randrange(2) for _ in range(self.address_size + (1 << self.address_size)) ]) return self.current_situation def execute(self, action): assert action in self.possible_actions self.remaining_cycles -= 1 index = int(bitstrings.BitString( self.current_situation[:self.address_size] )) bit = self.current_situation[self.address_size + index] return action == bit def more(self): return int(self.remaining_cycles > 0) class HaystackProblem(Scenario): def __init__(self, training_cycles=10000, input_size=500): assert isinstance(training_cycles, int) and training_cycles > 0 assert isinstance(input_size, int) and input_size > 0 self.input_size = input_size self.possible_actions = (True, False) self.initial_training_cycles = training_cycles self.remaining_cycles = training_cycles self.needle_index = random.randrange(input_size) self.needle_value = None @property def is_dynamic(self): return False def get_possible_actions(self): return self.possible_actions def reset(self): self.remaining_cycles = self.initial_training_cycles self.needle_index = random.randrange(self.input_size) def sense(self): haystack = bitstrings.BitString.random(self.input_size) self.needle_value = haystack[self.needle_index] return haystack def execute(self, action): assert action in self.possible_actions self.remaining_cycles -= 1 return action == self.needle_value def more(self): return self.remaining_cycles > 0 class ScenarioObserver(Scenario): def __init__(self, wrapped): assert isinstance(wrapped, Scenario) self.logger = logging.getLogger(__name__) self.wrapped = wrapped self.total_reward = 0 self.steps = 0 @property def is_dynamic(self): return self.wrapped.is_dynamic def get_possible_actions(self): possible_actions = self.wrapped.get_possible_actions() if len(possible_actions) <= 20: try: possible_actions = list(set(possible_actions)) except TypeError: possible_actions = list(possible_actions) try: possible_actions.sort() except TypeError: pass self.logger.info('Possible actions:') for action in possible_actions: self.logger.info(' %s', action) else: self.logger.info("%d possible actions.", len(possible_actions)) return possible_actions def reset(self): self.logger.info('Resetting scenario.') self.wrapped.reset() def sense(self): situation = self.wrapped.sense() self.logger.debug('Situation: %s', situation) return situation def execute(self, action): self.logger.debug('Executing action: %s', action) reward = self.wrapped.execute(action) if reward: self.total_reward += reward self.steps += 1 self.logger.debug('Reward received on this step: %.5f', reward or 0) self.logger.debug('Average reward per step: %.5f', self.total_reward / self.steps) return reward
BSD 3-Clause New or Revised License
surrealai/surreal
surreal/launch/launcher.py
Launcher.launch
python
def launch(self, component_name): raise NotImplementedError
Launches the specific component Args: component_name(str): the process to launch
https://github.com/surrealai/surreal/blob/ae9e5f43bdd7d1bc6d39d0a4783b96b2c117fade/surreal/launch/launcher.py#L58-L64
import time import os import sys import subprocess from argparse import ArgumentParser import numpy as np from tensorplex import Loggerplex from tensorplex import Tensorplex from surreal.distributed import ShardedParameterServer from surreal.replay import ShardedReplay, ReplayLoadBalancer import surreal.utils as U import faulthandler faulthandler.enable() class Launcher: def main(self): argv = sys.argv[1:] parser_args = argv config_args = [] if '--' in argv: index = argv.index('--') parser_args = argv[:index] config_args = argv[index + 1:] parser = ArgumentParser(description='launch a surreal component') parser.add_argument('component_name', type=str, help='which component to launch') args = parser.parse_args(parser_args) self.config_args = config_args self.setup(config_args) self.launch(args.component_name)
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/device_tracker/bt_home_hub_5.py
BTHomeHub5DeviceScanner.__init__
python
def __init__(self, config): _LOGGER.info("Initialising BT Home Hub 5") self.host = config.get(CONF_HOST, '192.168.1.254') self.last_results = {} self.url = 'http://{}/nonAuth/home_status.xml'.format(self.host) data = _get_homehub_data(self.url) self.success_init = data is not None
Initialise the scanner.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/device_tracker/bt_home_hub_5.py#L40-L49
import logging import re import xml.etree.ElementTree as ET import json from urllib.parse import unquote import requests import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA, DeviceScanner) from homeassistant.const import CONF_HOST _LOGGER = logging.getLogger(__name__) _MAC_REGEX = re.compile(r'(([0-9A-Fa-f]{1,2}\:){5}[0-9A-Fa-f]{1,2})') PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string }) def get_scanner(hass, config): scanner = BTHomeHub5DeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None class BTHomeHub5DeviceScanner(DeviceScanner):
MIT License
pckv/pcbot
plugins/brainfuck.py
brainfuck
python
async def brainfuck(message: discord.Message, code: Annotate.Code): program_input = "" if "," in code: await client.say(message, "**Input required, please type:**") reply = await client.wait_for_message(timeout=30, author=message.author, channel=message.channel) assert reply, "**You failed to reply.**" program_input = reply.clean_content await brainfuck_in_channel(message.channel, code, program_input)
Run the given brainfuck code and prompt for input if required. This interpretation of brainfuck always returns a value with the , command, which means that whenever there is no input to retrieve, a 0 would be inserted in the pointer cell.
https://github.com/pckv/pcbot/blob/88d1243f22f2b00d6872b2c50468f26e2ba12965/plugins/brainfuck.py#L152-L166
import discord import plugins from pcbot import Annotate, Config client = plugins.client cfg = Config("brainfuck", data={}) max_iterations = 2 ** 17 brainfuck_chars = "+-><][.," class Loop: def __init__(self, start, end): self.start = start self.end = end self.pointer = None def set_pointer(self, pointer): self.pointer = (pointer.cursor, pointer.value) def compare_pointer(self, pointer): if self.pointer is None: return False return self.pointer == (pointer.cursor, pointer.value) class TooManyIterations(Exception): pass class InfiniteLoop(Exception): pass class Pointer: cells = 2 ** 15 cell_size = 2 ** 8 - 1 def __init__(self): self.array = [0] * self.cells self.cursor = 0 @property def value(self): return self.array[self.cursor] @value.setter def value(self, value): self.array[self.cursor] = value def add(self): self.value += 1 if self.value > self.cell_size: self.value = 0 def sub(self): self.value -= 1 if self.value < 0: self.value = self.cell_size def right(self): self.cursor += 1 if self.cursor >= self.cells: self.cursor = 0 def left(self): self.cursor -= 1 if self.cursor < 0: self.cursor = self.cells - 1 def find_loop_end(code: str, start: int): nest = 1 for i, c in enumerate(code): if c == "[": nest += 1 elif c == "]": nest -= 1 if nest == 0: return start + i raise SyntaxError("{}: Loop never ends!".format(start)) def run_brainfuck(code: str, for_input: str=""): pointer = Pointer() input_pointer = Pointer() input_pointer.array[:len(for_input)] = list(ord(c) for c in for_input) loops = [] i, iterations = 0, 0 output = "" while True: char = code[i] if char == "+": pointer.add() elif char == "-": pointer.sub() elif char == ">": pointer.right() elif char == "<": pointer.left() elif char == ".": output += chr(pointer.value) elif char == ",": pointer.value = input_pointer.value input_pointer.right() if loops: loops[-1].pointer = None elif char == "[": end = find_loop_end(code[i + 1:], i) loops.append(Loop(start=i, end=end)) if pointer.value == 0: i = end elif char == "]": if loops: if loops[-1].compare_pointer(pointer): raise InfiniteLoop("{}: Pointer value unchanged.".format(loops[-1].start)) if not pointer.value == 0: i = loops[-1].start loops[-1].set_pointer(pointer) else: del loops[-1] i += 1 if i >= len(code): return output or "Pointer value: {}".format(pointer.value) iterations += 1 if iterations >= max_iterations: raise TooManyIterations("Program exceeded maximum number of iterations ({})".format(max_iterations)) async def brainfuck_in_channel(channel: discord.Channel, code, program_input): try: output = run_brainfuck(code, program_input) except Exception as e: await client.send_message(channel, "```\n{}: {}```".format(type(e).__name__, str(e))) else: assert len(output) <= 2000, "**The output was too long.**" await client.send_message(channel, "```\n{}```".format(output)) @plugins.command(aliases="bf")
MIT License
gkno/gkno_launcher
src/networkx/readwrite/multiline_adjlist.py
generate_multiline_adjlist
python
def generate_multiline_adjlist(G, delimiter = ' '): if G.is_directed(): if G.is_multigraph(): for s,nbrs in G.adjacency_iter(): nbr_edges=[ (u,data) for u,datadict in nbrs.items() for key,data in datadict.items()] deg=len(nbr_edges) yield make_str(s)+delimiter+"%i"%(deg) for u,d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u)+delimiter+make_str(d) else: for s,nbrs in G.adjacency_iter(): deg=len(nbrs) yield make_str(s)+delimiter+"%i"%(deg) for u,d in nbrs.items(): if d is None: yield make_str(u) else: yield make_str(u)+delimiter+make_str(d) else: if G.is_multigraph(): seen=set() for s,nbrs in G.adjacency_iter(): nbr_edges=[ (u,data) for u,datadict in nbrs.items() if u not in seen for key,data in datadict.items()] deg=len(nbr_edges) yield make_str(s)+delimiter+"%i"%(deg) for u,d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u)+delimiter+make_str(d) seen.add(s) else: seen=set() for s,nbrs in G.adjacency_iter(): nbr_edges=[ (u,d) for u,d in nbrs.items() if u not in seen] deg=len(nbr_edges) yield make_str(s)+delimiter+"%i"%(deg) for u,d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u)+delimiter+make_str(d) seen.add(s)
Generate a single line of the graph G in multiline adjacency list format. Parameters ---------- G : NetworkX graph delimiter : string, optional Separator for node labels Returns ------- lines : string Lines of data in multiline adjlist format. Examples -------- >>> G = nx.lollipop_graph(4, 3) >>> for line in nx.generate_multiline_adjlist(G): ... print(line) 0 3 1 {} 2 {} 3 {} 1 2 2 {} 3 {} 2 1 3 {} 3 1 4 {} 4 1 5 {} 5 1 6 {} 6 0 See Also -------- write_multiline_adjlist, read_multiline_adjlist
https://github.com/gkno/gkno_launcher/blob/4210ede8448155d70bfbdbd658125a1d95ea8e95/src/networkx/readwrite/multiline_adjlist.py#L46-L136
__author__ = '\n'.join(['Aric Hagberg <hagberg@lanl.gov>', 'Dan Schult <dschult@colgate.edu>', 'Loïc Séguin-C. <loicseguin@gmail.com>']) __all__ = ['generate_multiline_adjlist', 'write_multiline_adjlist', 'parse_multiline_adjlist', 'read_multiline_adjlist'] from networkx.utils import make_str, open_file import networkx as nx
MIT License
clementpinard/flownetpytorch
models/FlowNetC.py
flownetc
python
def flownetc(data=None): model = FlowNetC(batchNorm=False) if data is not None: model.load_state_dict(data['state_dict']) return model
FlowNetS model architecture from the "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852) Args: data : pretrained weights of the network. will create a new one if not set
https://github.com/clementpinard/flownetpytorch/blob/8465a43b95d8776207fd1ba6801b8d1f2134742f/models/FlowNetC.py#L112-L122
import torch import torch.nn as nn from torch.nn.init import kaiming_normal_, constant_ from .util import conv, predict_flow, deconv, crop_like, correlate __all__ = [ 'flownetc', 'flownetc_bn' ] class FlowNetC(nn.Module): expansion = 1 def __init__(self,batchNorm=True): super(FlowNetC,self).__init__() self.batchNorm = batchNorm self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2) self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1) self.conv3_1 = conv(self.batchNorm, 473, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm,1024, 1024) self.deconv5 = deconv(1024,512) self.deconv4 = deconv(1026,256) self.deconv3 = deconv(770,128) self.deconv2 = deconv(386,64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(1026) self.predict_flow4 = predict_flow(770) self.predict_flow3 = predict_flow(386) self.predict_flow2 = predict_flow(194) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): kaiming_normal_(m.weight, 0.1) if m.bias is not None: constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): constant_(m.weight, 1) constant_(m.bias, 0) def forward(self, x): x1 = x[:,:3] x2 = x[:,3:] out_conv1a = self.conv1(x1) out_conv2a = self.conv2(out_conv1a) out_conv3a = self.conv3(out_conv2a) out_conv1b = self.conv1(x2) out_conv2b = self.conv2(out_conv1b) out_conv3b = self.conv3(out_conv2b) out_conv_redir = self.conv_redir(out_conv3a) out_correlation = correlate(out_conv3a,out_conv3b) in_conv3_1 = torch.cat([out_conv_redir, out_correlation], dim=1) out_conv3 = self.conv3_1(in_conv3_1) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5) out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) flow5 = self.predict_flow5(concat5) flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4) out_deconv4 = crop_like(self.deconv4(concat5), out_conv4) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) flow4 = self.predict_flow4(concat4) flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3) out_deconv3 = crop_like(self.deconv3(concat4), out_conv3) concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) flow3 = self.predict_flow3(concat3) flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2a) out_deconv2 = crop_like(self.deconv2(concat3), out_conv2a) concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1) flow2 = self.predict_flow2(concat2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return flow2 def weight_parameters(self): return [param for name, param in self.named_parameters() if 'weight' in name] def bias_parameters(self): return [param for name, param in self.named_parameters() if 'bias' in name]
MIT License
allenporter/python-google-nest-sdm
google_nest_sdm/google_nest_api.py
GoogleNestAPI.async_get_device
python
async def async_get_device(self, device_id: str) -> Optional[Device]: resp = await self._auth.get(f"{self._devices_url}/{device_id}") data = await resp.json() if NAME not in data: return None return Device.MakeDevice(data, self._auth)
Return a specific device.
https://github.com/allenporter/python-google-nest-sdm/blob/b3e41cbaa6961f4eda1b4375cebeaba34239ef91/google_nest_sdm/google_nest_api.py#L57-L63
from typing import List, Optional from .auth import AbstractAuth from .device import Device from .structure import Structure STRUCTURES = "structures" DEVICES = "devices" NAME = "name" class GoogleNestAPI: def __init__(self, auth: AbstractAuth, project_id: str): self._auth = auth self._project_id = project_id @property def _structures_url(self) -> str: return f"enterprises/{self._project_id}/structures" async def async_get_structures(self) -> List[Structure]: resp = await self._auth.get(self._structures_url) response_data = await resp.json() if STRUCTURES not in response_data: return [] structures = response_data[STRUCTURES] return [ Structure.MakeStructure(structure_data) for structure_data in structures ] async def async_get_structure(self, structure_id: str) -> Optional[Structure]: resp = await self._auth.get(f"{self._structures_url}/{structure_id}") data = await resp.json() if NAME not in data: return None return Structure.MakeStructure(data) @property def _devices_url(self) -> str: return f"enterprises/{self._project_id}/devices" async def async_get_devices(self) -> List[Device]: resp = await self._auth.get(self._devices_url) response_data = await resp.json() if DEVICES not in response_data: return [] devices = response_data[DEVICES] return [Device.MakeDevice(device_data, self._auth) for device_data in devices]
Apache License 2.0
danielfrg/jupyterhub-kubernetes_spawner
kubernetes_spawner/swagger_client/models/v1_node_spec.py
V1NodeSpec.provider_id
python
def provider_id(self): return self._provider_id
Gets the provider_id of this V1NodeSpec. ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> :return: The provider_id of this V1NodeSpec. :rtype: str
https://github.com/danielfrg/jupyterhub-kubernetes_spawner/blob/15a2b63ef719f8c3ff83221333f7de69c1495512/kubernetes_spawner/swagger_client/models/v1_node_spec.py#L103-L111
from pprint import pformat from six import iteritems class V1NodeSpec(object): def __init__(self): self.swagger_types = { 'pod_cidr': 'str', 'external_id': 'str', 'provider_id': 'str', 'unschedulable': 'bool' } self.attribute_map = { 'pod_cidr': 'podCIDR', 'external_id': 'externalID', 'provider_id': 'providerID', 'unschedulable': 'unschedulable' } self._pod_cidr = None self._external_id = None self._provider_id = None self._unschedulable = None @property def pod_cidr(self): return self._pod_cidr @pod_cidr.setter def pod_cidr(self, pod_cidr): self._pod_cidr = pod_cidr @property def external_id(self): return self._external_id @external_id.setter def external_id(self, external_id): self._external_id = external_id @property
Apache License 2.0
pytorchrl/pytorchrl
pytorchrl/agent/algorithms/ddqn.py
DDQN.set_weights
python
def set_weights(self, weights): self.actor.load_state_dict(weights) self.iter += 1 self.update_target_networks() self.update_epsilon()
Update actor critic with the given weights. Update also target networks. Parameters ---------- weights: dict of tensors Dict containing actor weights to be set.
https://github.com/pytorchrl/pytorchrl/blob/1ea781580f31cf3bbe5b38b8cceca939ed713241/pytorchrl/agent/algorithms/ddqn.py#L378-L392
import random import numpy as np from copy import deepcopy import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import pytorchrl as prl from pytorchrl.agent.algorithms.base import Algorithm from pytorchrl.agent.algorithms.utils import get_gradients, set_gradients from pytorchrl.agent.algorithms.policy_loss_addons import PolicyLossAddOn class DDQN(Algorithm): def __init__(self, device, actor, lr=1e-4, gamma=0.99, polyak=0.995, num_updates=1, update_every=50, test_every=5000, max_grad_norm=0.5, start_steps=20000, mini_batch_size=64, num_test_episodes=5, initial_epsilon=1.0, epsilon_decay=0.999, target_update_interval=1, policy_loss_addons=[]): self._gamma = gamma self._start_steps = int(start_steps) self._num_epochs = 1 self._update_every = int(update_every) self._num_mini_batch = int(num_updates) self._mini_batch_size = int(mini_batch_size) self._test_every = int(test_every) self._num_test_episodes = int(num_test_episodes) self.iter = 0 self.device = device self.polyak = polyak self.epsilon = initial_epsilon self.actor = actor self.max_grad_norm = max_grad_norm self.epsilon_decay = epsilon_decay self.target_update_interval = target_update_interval assert hasattr(self.actor, "q1"), "DDPG requires q critic (num_critics=1)" self.actor_targ = deepcopy(actor) for p in self.actor_targ.parameters(): p.requires_grad = False self.q_optimizer = optim.Adam(self.actor.q1.parameters(), lr=lr) assert isinstance(policy_loss_addons, (PolicyLossAddOn, list)), "DDQN policy_loss_addons parameter should be a PolicyLossAddOn instance " "or a list of PolicyLossAddOn instances" if isinstance(policy_loss_addons, list): for addon in policy_loss_addons: assert isinstance(addon, PolicyLossAddOn), "DDQN policy_loss_addons parameter should be a PolicyLossAddOn" " instance or a list of PolicyLossAddOn instances" else: policy_loss_addons = [policy_loss_addons] self.policy_loss_addons = policy_loss_addons for addon in self.policy_loss_addons: addon.setup(self.device) @classmethod def create_factory(cls, lr=1e-4, gamma=0.99, polyak=0.995, num_updates=50, update_every=50, test_every=5000, start_steps=20000, max_grad_norm=0.5, mini_batch_size=64, num_test_episodes=5, epsilon_decay=0.999, initial_epsilon=1.0, target_update_interval=1, policy_loss_addons=[]): def create_algo_instance(device, actor): return cls(lr=lr, gamma=gamma, device=device, polyak=polyak, actor=actor, test_every=test_every, start_steps=start_steps, num_updates=num_updates, update_every=update_every, epsilon_decay=epsilon_decay, max_grad_norm=max_grad_norm, mini_batch_size=mini_batch_size, initial_epsilon=initial_epsilon, num_test_episodes=num_test_episodes, target_update_interval=target_update_interval, policy_loss_addons=policy_loss_addons) return create_algo_instance, prl.DDQN def acting_step(self, obs, rhs, done, deterministic=False): if random.random() > self.epsilon: with torch.no_grad(): q = self.actor.get_q_scores(obs).get("q1") action = clipped_action = torch.argmax(q, dim=1).unsqueeze(0) else: action = clipped_action = torch.tensor( [self.actor.action_space.sample()]).unsqueeze(0) other = {} return action, clipped_action, rhs, other def compute_loss(self, batch, n_step=1, per_weights=1): o, rhs, d = batch[prl.OBS], batch[prl.RHS], batch[prl.DONE] a, r = batch[prl.ACT], batch[prl.REW] o2, rhs2, d2 = batch[prl.OBS2], batch[prl.RHS2], batch[prl.DONE2] q_targ_vals = self.actor_targ.get_q_scores(o2, rhs2, d2).get("q1") q_targ_next = q_targ_vals.max(dim=1)[0].unsqueeze(1) q_targ = r + (self.gamma ** n_step) * (1 - d2) * q_targ_next q_vals = self.actor.get_q_scores(o, rhs, d).get("q1") q_exp = q_vals.gather(1, a.long()) loss = F.mse_loss(q_targ, q_exp) errors = (q_exp - q_targ).abs().detach().cpu() return loss, errors def compute_gradients(self, batch, grads_to_cpu=True): if self.actor.is_recurrent: batch = self.actor.burn_in_recurrent_states(batch) n_step = batch["n_step"] if "n_step" in batch else 1.0 per_weights = batch["per_weights"] if "per_weights" in batch else 1.0 loss, errors = self.compute_loss(batch, n_step, per_weights) self.q_optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.actor.q1.parameters(), self.max_grad_norm) grads = get_gradients(self.actor.q1, grads_to_cpu=grads_to_cpu) info = { "loss_q": loss.detach().item(), "epsilon": self.epsilon, } if "per_weights" in batch: info.update({"errors": errors}) return grads, info def update_target_networks(self): if self.iter % self.target_update_interval == 0: with torch.no_grad(): for p, p_targ in zip(self.actor.parameters(), self.actor_targ.parameters()): p_targ.data.mul_(self.polyak) p_targ.data.add_((1 - self.polyak) * p.data) def update_epsilon(self): self.epsilon *= self.epsilon_decay self.epsilon = np.clip(self.epsilon, 0.05, 1.0) def apply_gradients(self, gradients=None): if gradients is not None: set_gradients( self.actor.q1, gradients=gradients, device=self.device) self.q_optimizer.step() self.iter += 1 self.update_target_networks() self.update_epsilon()
MIT License
sphinx-toolbox/sphinx-toolbox
sphinx_toolbox/wikipedia.py
setup
python
def setup(app: Sphinx) -> SphinxExtMetadata: app.add_role("wikipedia", make_wikipedia_link) app.add_config_value("wikipedia_lang", "en", "env", [str]) return {"parallel_read_safe": True}
Setup :mod:`sphinx_toolbox.wikipedia`. .. versionadded:: 1.0.0 :param app: The Sphinx application.
https://github.com/sphinx-toolbox/sphinx-toolbox/blob/cee88c6bceac20a9ae0e381ada2fb2453ca3fc0b/sphinx_toolbox/wikipedia.py#L157-L169
import re from typing import Dict, List, Tuple from urllib.parse import quote from apeye.url import URL from docutils import nodes from docutils.nodes import system_message from docutils.parsers.rst.states import Inliner from sphinx.application import Sphinx from sphinx.util.nodes import split_explicit_title from sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version __all__ = ["make_wikipedia_link", "setup"] base_url = "https://%s.wikipedia.org/wiki" _wiki_lang_re = re.compile(":(.*?):(.*)") def _get_wikipedia_lang(inliner: Inliner): return inliner.document.settings.env.config.wikipedia_lang def make_wikipedia_link( name: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: Dict = {}, content: List[str] = [] ) -> Tuple[List[nodes.reference], List[system_message]]: text = nodes.unescape(text) has_explicit, title, target = split_explicit_title(text) m = _wiki_lang_re.match(target) if m: lang, target = m.groups() if not has_explicit: title = target else: lang = _get_wikipedia_lang(inliner) ref = URL(base_url % lang) / quote(target.replace(' ', '_'), safe='') node = nodes.reference(rawtext, title, refuri=str(ref), **options) return [node], [] @metadata_add_version
MIT License
anhaidgroup/py_stringsimjoin
py_stringsimjoin/join/overlap_join_py.py
overlap_join_py
python
def overlap_join_py(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, comp_op='>=', allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): validate_tokenizer(tokenizer) revert_tokenizer_return_set_flag = False if not tokenizer.get_return_set(): tokenizer.set_return_set(True) revert_tokenizer_return_set_flag = True overlap_filter = OverlapFilter(tokenizer, threshold, comp_op, allow_missing) output_table = overlap_filter.filter_tables(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, n_jobs, show_progress) if revert_tokenizer_return_set_flag: tokenizer.set_return_set(False) return output_table
Join two tables using overlap measure. For two sets X and Y, the overlap between them is given by: :math:`overlap(X, Y) = |X \\cap Y|` Finds tuple pairs from left table and right table such that the overlap between the join attributes satisfies the condition on input threshold. For example, if the comparison operator is '>=', finds tuple pairs whose overlap between the strings that are the values of the join attributes is greater than or equal to the input threshold, as specified in "threshold". Args: ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_join_attr (string): join attribute in left table. r_join_attr (string): join attribute in right table. tokenizer (Tokenizer): tokenizer to be used to tokenize join attributes. threshold (float): overlap threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>' and '=' (defaults to '>='). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the join attributes should be included in the output (defaults to False). If this flag is set to True, a tuple in ltable with missing value in the join attribute will be matched with every tuple in rtable and vice versa. l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs that satisfy the join condition (DataFrame).
https://github.com/anhaidgroup/py_stringsimjoin/blob/fa11e9225ba772dfd45b1d055d8d2805fac9e649/py_stringsimjoin/join/overlap_join_py.py#L5-L110
from py_stringsimjoin.filter.overlap_filter import OverlapFilter from py_stringsimjoin.utils.validation import validate_tokenizer
BSD 3-Clause New or Revised License
luno/luno-python
luno_python/base_client.py
BaseClient.__init__
python
def __init__(self, base_url='', timeout=0, api_key_id='', api_key_secret=''): self.set_auth(api_key_id, api_key_secret) self.set_base_url(base_url) self.set_timeout(timeout) self.session = requests.Session()
:type base_url: str :type timeout: float :type api_key_id: str :type api_key_secret: str
https://github.com/luno/luno-python/blob/95b742e35cdc5353b8a9771f39ded9daf629c852/luno_python/base_client.py#L22-L34
import json import platform import requests import six try: from json.decoder import JSONDecodeError except ImportError: JSONDecodeError = ValueError from . import VERSION from .error import APIError DEFAULT_BASE_URL = 'https://api.luno.com' DEFAULT_TIMEOUT = 10 PYTHON_VERSION = platform.python_version() SYSTEM = platform.system() ARCH = platform.machine() class BaseClient:
MIT License
indeedeng/django-proctor
proctor/cache.py
SessionCacher._get_session_dict_key
python
def _get_session_dict_key(self): return 'proctorcache'
Return the key used for the request.session dict.
https://github.com/indeedeng/django-proctor/blob/b9f5ffc94b62072bd0cb2c9e22abfae738c329e3/proctor/cache.py#L187-L189
from __future__ import absolute_import, unicode_literals import logging import string import time import django.core.cache import six from . import api from . import groups logger = logging.getLogger('application.proctor.cache') class Cacher(object): def __init__(self, version_timeout_seconds=None): self.version_timeout_seconds = (version_timeout_seconds if version_timeout_seconds is not None else (5 * 60)) def get(self, request, params, allow_expired=False): latest_seen_version = self._get_latest_version() if latest_seen_version is None: logger.debug("Proctor cache MISS (version expired)") return None cache_dict = self._get_cache_dict(request, params) if cache_dict is None: logger.debug("Proctor cache MISS (absent)") return None group_dict = cache_dict['group_dict'] cache_params = api.ProctorParameters(**cache_dict['params']) cache_matrix_version = cache_dict['matrix_version'] valid = (cache_matrix_version == latest_seen_version and params == cache_params) if valid: logger.debug("Proctor cache HIT") return {key: groups.GroupAssignment(*val) for key, val in six.iteritems(group_dict)} else: logger.debug("Proctor cache MISS (invalidated)") self._del_cache_dict(request, params) return None def set(self, request, params, group_dict, api_response): latest_seen_version = self.update_matrix_version(api_response) cache_dict = {} cache_dict['group_dict'] = group_dict cache_dict['params'] = params.as_dict() cache_dict['matrix_version'] = latest_seen_version self._set_cache_dict(request, params, cache_dict) logger.debug("Proctor cache SET") def update_matrix_version(self, api_response): new_version = api_response['data']['audit']['version'] latest_seen_version = self._get_latest_version() self._set_latest_version(new_version) if latest_seen_version != new_version: logger.debug("Proctor test matrix version changed to %s.", new_version) latest_seen_version = new_version return latest_seen_version def _get_cache_dict(self, request, params): raise NotImplementedError("_get_cache_dict() must be overridden.") def _set_cache_dict(self, request, params, cache_dict): raise NotImplementedError("_set_cache_dict() must be overridden.") def _del_cache_dict(self, request, params): raise NotImplementedError("_del_cache_dict() must be overridden.") def _get_latest_version(self): raise NotImplementedError("_get_latest_version() must be overridden.") def _set_latest_version(self, version): raise NotImplementedError("_set_latest_version() must be overridden.") class SessionCacher(Cacher): def __init__(self, version_timeout_seconds=None): super(SessionCacher, self).__init__(version_timeout_seconds) self.seen_matrix_version = None self.version_expiry_time = time.time() def get(self, request, params, allow_expired=False): if allow_expired: self.version_expiry_time = time.time() + self.version_timeout_seconds return super(SessionCacher, self).get(request, params) def _get_cache_dict(self, request, params): return request.session.get(self._get_session_dict_key()) def _set_cache_dict(self, request, params, cache_dict): request.session[self._get_session_dict_key()] = cache_dict def _del_cache_dict(self, request, params): del request.session[self._get_session_dict_key()] def _get_latest_version(self): if time.time() >= self.version_expiry_time: return None else: return self.seen_matrix_version def _set_latest_version(self, version): self.seen_matrix_version = version self.version_expiry_time = time.time() + self.version_timeout_seconds
Apache License 2.0
danielnyga/pracmln
python2/pracmln/logic/common.py
Logic.copy
python
def copy(self, mln=None, idx=inherit): raise Exception('%s does not implement copy()' % str(type(self)))
Produces a deep copy of this formula. If `mln` is specified, the copied formula will be tied to `mln`. If not, it will be tied to the same MLN as the original formula is. If `idx` is None, the index of the original formula will be used. :param mln: the MLN that the new formula shall be tied to. :param idx: the index of the formula. If `None`, the index of this formula will be erased to `None`. if `idx` is `auto`, the formula will get a new index from the MLN. if `idx` is :class:`mln.constants.inherit`, the index from this formula will be inherited to the copy (default).
https://github.com/danielnyga/pracmln/blob/bbda65696fb8753b11ff007e991280ebe42d78f9/python2/pracmln/logic/common.py#L496-L509
import sys from dnutils import logs, ifnone from pracmln.mln.util import fstr, dict_union, colorize from pracmln.mln.errors import NoSuchDomainError, NoSuchPredicateError from collections import defaultdict import itertools from pracmln.mln.constants import HARD, auto, predicate_color, inherit from grammar import StandardGrammar, PRACGrammar logger = logs.getlogger(__name__) def latexsym(sym): return r'\textit{%s}' % str(sym) class Logic(object): def __init__(self, grammar, mln): if grammar not in ('StandardGrammar', 'PRACGrammar'): raise Exception('Invalid grammar: %s' % grammar) self.grammar = eval(grammar)(self) self.mln = mln def __getstate__(self): d = self.__dict__.copy() d['grammar'] = type(self.grammar).__name__ return d def __setstate__(self, d): self.__dict__ = d self.grammar = eval(d['grammar'])(self) class Constraint(object): def template_variants(self, mln): raise Exception("%s does not implement getTemplateVariants" % str(type(self))) def truth(self, world): raise Exception("%s does not implement truth" % str(type(self))) def islogical(self): raise Exception("%s does not implement islogical" % str(type(self))) def itergroundings(self, mrf, simplify=False, domains=None): raise Exception("%s does not implement itergroundings" % str(type(self))) def idx_gndatoms(self, l=None): raise Exception("%s does not implement idxgndatoms" % str(type(self))) def gndatoms(self, l=None): raise Exception("%s does not implement gndatoms" % str(type(self))) class Formula(Constraint): def __init__(self, mln=None, idx=None): self.mln = mln if idx == auto and mln is not None: self.idx = len(mln.formulas) else: self.idx = idx @property def idx(self): return self._idx @idx.setter def idx(self, idx): self._idx = idx @property def mln(self): return self._mln @mln.setter def mln(self, mln): if hasattr(self, 'children'): for child in self.children: child.mln = mln self._mln = mln @property def weight(self): return self.mln.weight(self.idx) @weight.setter def weight(self, w): if self.idx is None: raise Exception('%s does not have an index' % str(self)) self.mln.weight(self.idx, w) @property def ishard(self): return self.weight == HARD def contains_gndatom(self, gndatomidx): if not hasattr(self, "children"): return False for child in self.children: if child.contains_gndatom(gndatomidx): return True return False def gndatom_indices(self, l=None): if l == None: l = [] if not hasattr(self, "children"): return l for child in self.children: child.gndatom_indices(l) return l def gndatoms(self, l=None): if l is None: l = [] if not hasattr(self, "children"): return l for child in self.children: child.gndatoms(l) return l def templ_atoms(self): templ_atoms = [] for literal in self.literals(): for templ in literal.template_variants(): templ_atoms.append(templ) return templ_atoms def atomic_constituents(self, oftype=None): const = list(self.literals()) if oftype is None: return const else: return filter(lambda c: isinstance(c, oftype), const) def constituents(self, oftype=None, const=None): if const is None: const = [] if oftype is None or type(self) is oftype: const.append(self) if hasattr(self, 'children'): for child in self.children: child.constituents(oftype, const) return const def template_variants(self): uniqvars = list(self.mln._unique_templvars[self.idx]) vardoms = self.template_variables() uniqvars_ = defaultdict(set) for var in uniqvars: dom = vardoms[var] uniqvars_[dom].add(var) assignments = [] for domain, variables in uniqvars_.iteritems(): group = [] domvalues = self.mln.domains[domain] if not domvalues: logger.warning('Template variants cannot be constructed since the domain "{}" is empty.'.format(domain)) for values in itertools.combinations(domvalues, len(variables)): group.append(dict([(var, val) for var, val in zip(variables, values)])) assignments.append(group) for variable, domain in vardoms.iteritems(): if variable in uniqvars: continue group = [] domvalues = self.mln.domains[domain] if not domvalues: logger.warning('Template variants cannot be constructed since the domain "{}" is empty.'.format(domain)) for value in self.mln.domains[domain]: group.append(dict([(variable, value)])) assignments.append(group) def product(assign, result=[]): if len(assign) == 0: yield result return for a in assign[0]: for r in product(assign[1:], result+[a]): yield r for assignment in product(assignments): if assignment: for t in self._ground_template(reduce(lambda x, y: dict_union(x, y), itertools.chain(assignment))): yield t else: for t in self._ground_template({}): yield t def template_variables(self, variable=None): raise Exception("%s does not implement template_variables" % str(type(self))) def _ground_template(self, assignment): raise Exception("%s does not implement _ground_template" % str(type(self))) def itervargroundings(self, mrf, partial=None): variables = self.vardoms() if partial is not None: for v in [p for p in partial if p in variables]: del variables[v] for assignment in self._itervargroundings(mrf, variables, {}): yield assignment def _itervargroundings(self, mrf, variables, assignment): if variables == {}: yield assignment return variables = dict(variables) varname, domname = variables.popitem() domain = mrf.domains[domname] assignment = dict(assignment) for value in domain: assignment[varname] = value for assign in self._itervargroundings(mrf, dict(variables), assignment): yield assign def itergroundings(self, mrf, simplify=False, domains=None): try: variables = self.vardoms() except Exception, e: raise Exception("Error grounding '%s': %s" % (str(self), str(e))) for grounding in self._itergroundings(mrf, variables, {}, simplify, domains): yield grounding def iter_true_var_assignments(self, mrf, world=None, truth_thr=1.0, strict=False, unknown=False, partial=None): if world is None: world = list(mrf.evidence) if partial is None: partial = {} try: variables = self.vardoms() for var in partial: if var in variables: del variables[var] except Exception, e: raise Exception("Error grounding '%s': %s" % (str(self), str(e))) for assignment in self._iter_true_var_assignments(mrf, variables, partial, world, dict(variables), truth_thr=truth_thr, strict=strict, unknown=unknown): yield assignment def _iter_true_var_assignments(self, mrf, variables, assignment, world, allvars, truth_thr=1.0, strict=False, unknown=False): if variables == {}: gf = self.ground(mrf, assignment) truth = gf(world) if (((truth >= truth_thr) if not strict else (truth > truth_thr)) and truth is not None) or (truth is None and unknown): true_assignment = {} for v in allvars: true_assignment[v] = assignment[v] yield true_assignment return varname, domname = variables.popitem() assignment_ = dict(assignment) if domname not in mrf.domains: raise NoSuchDomainError('The domain %s does not exist, but is needed to ground the formula %s' % (domname, str(self))) for value in mrf.domains[domname]: assignment_[varname] = value for ass in self._iter_true_var_assignments(mrf, dict(variables), assignment_, world, allvars, truth_thr=truth_thr, strict=strict, unknown=unknown): yield ass def _itergroundings(self, mrf, variables, assignment, simplify=False, domains=None): if not variables: gf = self.ground(mrf, assignment, simplify, domains) yield gf return varname, domname = variables.popitem() domain = domains[varname] if domains is not None else mrf.domains[domname] for value in domain: assignment[varname] = value for gf in self._itergroundings(mrf, dict(variables), assignment, simplify, domains): yield gf def vardoms(self, variables=None, constants=None): raise Exception("%s does not implement vardoms()" % str(type(self))) def prednames(self, prednames=None): raise Exception('%s does not implement prednames()' % str(type(self))) def ground(self, mrf, assignment, simplify=False, partial=False): raise Exception("%s does not implement ground" % str(type(self)))
BSD 2-Clause Simplified License
aio-libs/aiokafka
aiokafka/consumer/fetcher.py
Fetcher._proc_offset_requests
python
async def _proc_offset_requests(self, timestamps): await self._client._maybe_wait_metadata() timestamps_by_node = collections.defaultdict( lambda: collections.defaultdict(list)) for partition, timestamp in timestamps.items(): node_id = self._client.cluster.leader_for_partition(partition) if node_id is None: self._client.add_topic(partition.topic) log.debug("Partition %s is unknown for fetching offset," " wait for metadata refresh", partition) raise Errors.StaleMetadata(partition) elif node_id == -1: log.debug("Leader for partition %s unavailable for fetching " "offset, wait for metadata refresh", partition) raise Errors.LeaderNotAvailableError(partition) else: timestamps_by_node[node_id][partition.topic].append( (partition.partition, timestamp) ) futs = [] for node_id, topic_data in timestamps_by_node.items(): futs.append( self._proc_offset_request(node_id, topic_data) ) offsets = {} res = await asyncio.gather(*futs) for partial_offsets in res: offsets.update(partial_offsets) return offsets
Fetch offsets for each partition in timestamps dict. This may send request to multiple nodes, based on who is Leader for partition. Arguments: timestamps (dict): {TopicPartition: int} mapping of fetching timestamps. Returns: Future: resolves to a mapping of retrieved offsets
https://github.com/aio-libs/aiokafka/blob/901b2cb37291c8ee9cad93b6f3f4ee05518f0817/aiokafka/consumer/fetcher.py#L896-L941
import asyncio import collections import logging import random import time from itertools import chain from kafka.protocol.offset import OffsetRequest from aiokafka.protocol.fetch import FetchRequest import aiokafka.errors as Errors from aiokafka.errors import ( ConsumerStoppedError, RecordTooLargeError, KafkaTimeoutError) from aiokafka.record.memory_records import MemoryRecords from aiokafka.record.control_record import ControlRecord, ABORT_MARKER from aiokafka.structs import OffsetAndTimestamp, TopicPartition, ConsumerRecord from aiokafka.util import create_future, create_task log = logging.getLogger(__name__) UNKNOWN_OFFSET = -1 READ_UNCOMMITTED = 0 READ_COMMITTED = 1 class OffsetResetStrategy: LATEST = -1 EARLIEST = -2 NONE = 0 @classmethod def from_str(cls, name): name = name.lower() if name == "latest": return cls.LATEST if name == "earliest": return cls.EARLIEST if name == "none": return cls.NONE else: log.warning( 'Unrecognized ``auto_offset_reset`` config, using NONE') return cls.NONE @classmethod def to_str(cls, value): if value == cls.LATEST: return "latest" if value == cls.EARLIEST: return "earliest" if value == cls.NONE: return "none" else: return f"timestamp({value})" class FetchResult: def __init__( self, tp, *, assignment, partition_records, backoff): self._topic_partition = tp self._partition_records = partition_records self._created = time.monotonic() self._backoff = backoff self._assignment = assignment def calculate_backoff(self): lifetime = time.monotonic() - self._created if lifetime < self._backoff: return self._backoff - lifetime return 0 def check_assignment(self, tp): assignment = self._assignment return_result = True if assignment.active: tp = self._topic_partition tp_state = assignment.state_value(tp) if tp_state.paused: return_result = False else: position = tp_state.position if position != self._partition_records.next_fetch_offset: return_result = False else: return_result = False if not return_result: log.debug("Not returning fetched records for partition %s" " since it is no fetchable (unassigned or paused)", tp) self._partition_records = None return False return True def _update_position(self): state = self._assignment.state_value(self._topic_partition) state.consumed_to(self._partition_records.next_fetch_offset) def getone(self): tp = self._topic_partition if not self.check_assignment(tp) or not self.has_more(): return while True: try: msg = next(self._partition_records) except StopIteration: self._update_position() self._partition_records = None return else: self._update_position() return msg def getall(self, max_records=None): tp = self._topic_partition if not self.check_assignment(tp) or not self.has_more(): return [] ret_list = [] for msg in self._partition_records: ret_list.append(msg) if max_records is not None and len(ret_list) >= max_records: self._update_position() break else: self._update_position() self._partition_records = None return ret_list def has_more(self): return self._partition_records is not None def __repr__(self): return f"<FetchResult position={self._partition_records.next_fetch_offset!r}>" class FetchError: def __init__(self, *, error, backoff): self._error = error self._created = time.monotonic() self._backoff = backoff def calculate_backoff(self): lifetime = time.monotonic() - self._created if lifetime < self._backoff: return self._backoff - lifetime return 0 def check_raise(self): raise self._error def __repr__(self): return f"<FetchError error={self._error!r}>" class PartitionRecords: def __init__( self, tp, records, aborted_transactions, fetch_offset, key_deserializer, value_deserializer, check_crcs, isolation_level): self._tp = tp self._records = records self._aborted_transactions = sorted( aborted_transactions or [], key=lambda x: x[1]) self._aborted_producers = set() self._key_deserializer = key_deserializer self._value_deserializer = value_deserializer self._check_crcs = check_crcs self._isolation_level = isolation_level self.next_fetch_offset = fetch_offset self._records_iterator = self._unpack_records() def __iter__(self): return self def __next__(self): try: return next(self._records_iterator) except StopIteration: self._records_iterator = None raise def _unpack_records(self): tp = self._tp records = self._records while records.has_next(): next_batch = records.next_batch() if self._check_crcs and not next_batch.validate_crc(): raise Errors.CorruptRecordException( f"Invalid CRC - {tp}") if self._isolation_level == READ_COMMITTED and next_batch.producer_id is not None: self._consume_aborted_up_to(next_batch.base_offset) if next_batch.is_control_batch: if self._contains_abort_marker(next_batch): self._aborted_producers.discard(next_batch.producer_id) if next_batch.is_transactional and next_batch.producer_id in self._aborted_producers: log.debug( "Skipping aborted record batch from partition %s with" " producer_id %s and offsets %s to %s", tp, next_batch.producer_id, next_batch.base_offset, next_batch.next_offset - 1 ) self.next_fetch_offset = next_batch.next_offset continue if next_batch.is_control_batch: self.next_fetch_offset = next_batch.next_offset continue for record in next_batch: if record.offset < self.next_fetch_offset: continue consumer_record = self._consumer_record(tp, record) self.next_fetch_offset = record.offset + 1 yield consumer_record self.next_fetch_offset = next_batch.next_offset def _consume_aborted_up_to(self, batch_offset): aborted_transactions = self._aborted_transactions while aborted_transactions: producer_id, first_offset = aborted_transactions[0] if first_offset <= batch_offset: self._aborted_producers.add(producer_id) aborted_transactions.pop(0) else: break def _contains_abort_marker(self, next_batch): try: control_record = next(next_batch) except StopIteration: raise Errors.KafkaError( "Control batch did not contain any records") return ControlRecord.parse(control_record.key) == ABORT_MARKER def _consumer_record(self, tp, record): key_size = len(record.key) if record.key is not None else -1 value_size = len(record.value) if record.value is not None else -1 if self._key_deserializer: key = self._key_deserializer(record.key) else: key = record.key if self._value_deserializer: value = self._value_deserializer(record.value) else: value = record.value return ConsumerRecord( tp.topic, tp.partition, record.offset, record.timestamp, record.timestamp_type, key, value, record.checksum, key_size, value_size, tuple(record.headers)) class Fetcher: def __init__( self, client, subscriptions, *, key_deserializer=None, value_deserializer=None, fetch_min_bytes=1, fetch_max_bytes=52428800, fetch_max_wait_ms=500, max_partition_fetch_bytes=1048576, check_crcs=True, fetcher_timeout=0.2, prefetch_backoff=0.1, retry_backoff_ms=100, auto_offset_reset='latest', isolation_level="read_uncommitted"): self._client = client self._loop = client._loop self._key_deserializer = key_deserializer self._value_deserializer = value_deserializer self._fetch_min_bytes = fetch_min_bytes self._fetch_max_bytes = fetch_max_bytes self._fetch_max_wait_ms = fetch_max_wait_ms self._max_partition_fetch_bytes = max_partition_fetch_bytes self._check_crcs = check_crcs self._fetcher_timeout = fetcher_timeout self._prefetch_backoff = prefetch_backoff self._retry_backoff = retry_backoff_ms / 1000 self._subscriptions = subscriptions self._default_reset_strategy = OffsetResetStrategy.from_str( auto_offset_reset) if isolation_level == "read_uncommitted": self._isolation_level = READ_UNCOMMITTED elif isolation_level == "read_committed": self._isolation_level = READ_COMMITTED else: raise ValueError( f"Incorrect isolation level {isolation_level}") self._records = collections.OrderedDict() self._in_flight = set() self._pending_tasks = set() self._wait_consume_future = None self._fetch_waiters = set() self._subscriptions.register_fetch_waiters(self._fetch_waiters) if client.api_version >= (0, 11): req_version = 4 elif client.api_version >= (0, 10, 1): req_version = 3 elif client.api_version >= (0, 10): req_version = 2 else: req_version = 1 self._fetch_request_class = FetchRequest[req_version] self._fetch_task = create_task(self._fetch_requests_routine()) self._closed = False async def close(self): self._closed = True self._fetch_task.cancel() try: await self._fetch_task except asyncio.CancelledError: pass for waiter in self._fetch_waiters: self._notify(waiter) for x in self._pending_tasks: x.cancel() await x def _notify(self, future): if future is not None and not future.done(): future.set_result(None) def _create_fetch_waiter(self): fut = self._loop.create_future() self._fetch_waiters.add(fut) fut.add_done_callback( lambda f, waiters=self._fetch_waiters: waiters.remove(f)) return fut @property def error_future(self): return self._fetch_task async def _fetch_requests_routine(self): try: assignment = None def start_pending_task(coro, node_id, self=self): task = create_task(coro) self._pending_tasks.add(task) self._in_flight.add(node_id) def on_done(fut, self=self): self._in_flight.discard(node_id) task.add_done_callback(on_done) while True: if assignment is None or not assignment.active: for task in self._pending_tasks: if not task.done(): task.cancel() await task self._pending_tasks.clear() self._records.clear() subscription = self._subscriptions.subscription if subscription is None or subscription.assignment is None: try: waiter = self._subscriptions.wait_for_assignment() await waiter except Errors.KafkaError: continue assignment = self._subscriptions.subscription.assignment assert assignment is not None and assignment.active self._wait_consume_future = create_future() (fetch_requests, reset_requests, timeout, invalid_metadata, resume_futures) = self._get_actions_per_node(assignment) for node_id, request in fetch_requests: start_pending_task( self._proc_fetch_request(assignment, node_id, request), node_id=node_id) for node_id, tps in reset_requests.items(): start_pending_task( self._update_fetch_positions(assignment, node_id, tps), node_id=node_id) other_futs = [self._wait_consume_future, assignment.unassign_future] if invalid_metadata: fut = self._client.force_metadata_update() other_futs.append(fut) done_set, _ = await asyncio.wait( set( chain(self._pending_tasks, other_futs, resume_futures) ), timeout=timeout, return_when=asyncio.FIRST_COMPLETED) done_pending = self._pending_tasks.intersection(done_set) if done_pending: has_new_data = any(fut.result() for fut in done_pending) if has_new_data: for waiter in self._fetch_waiters: self._notify(waiter) self._pending_tasks -= done_pending except asyncio.CancelledError: pass except Exception: log.error("Unexpected error in fetcher routine", exc_info=True) raise Errors.KafkaError("Unexpected error during data retrieval") def _get_actions_per_node(self, assignment): fetchable = collections.defaultdict(list) awaiting_reset = collections.defaultdict(list) backoff_by_nodes = collections.defaultdict(list) resume_futures = [] invalid_metadata = False for tp in assignment.tps: tp_state = assignment.state_value(tp) node_id = self._client.cluster.leader_for_partition(tp) backoff = 0 if tp in self._records: record = self._records[tp] backoff = record.calculate_backoff() if backoff: backoff_by_nodes[node_id].append(backoff) elif node_id in self._in_flight: continue elif node_id is None or node_id == -1: log.debug("No leader found for partition %s." " Waiting metadata update", tp) invalid_metadata = True elif not tp_state.has_valid_position: awaiting_reset[node_id].append(tp) elif tp_state.paused: resume_futures.append(tp_state.resume_fut) else: position = tp_state.position fetchable[node_id].append((tp, position)) log.debug( "Adding fetch request for partition %s at offset %d", tp, position) fetch_requests = [] for node_id, partition_data in fetchable.items(): if node_id in backoff_by_nodes: continue if node_id in awaiting_reset: continue random.shuffle(partition_data) by_topics = collections.defaultdict(list) for tp, position in partition_data: by_topics[tp.topic].append(( tp.partition, position, self._max_partition_fetch_bytes)) klass = self._fetch_request_class if klass.API_VERSION > 3: req = klass( -1, self._fetch_max_wait_ms, self._fetch_min_bytes, self._fetch_max_bytes, self._isolation_level, list(by_topics.items())) elif klass.API_VERSION == 3: req = klass( -1, self._fetch_max_wait_ms, self._fetch_min_bytes, self._fetch_max_bytes, list(by_topics.items())) else: req = klass( -1, self._fetch_max_wait_ms, self._fetch_min_bytes, list(by_topics.items())) fetch_requests.append((node_id, req)) if backoff_by_nodes: backoff = min(map(max, backoff_by_nodes.values())) else: backoff = self._fetcher_timeout return ( fetch_requests, awaiting_reset, backoff, invalid_metadata, resume_futures ) async def _proc_fetch_request(self, assignment, node_id, request): needs_wakeup = False try: response = await self._client.send(node_id, request) except Errors.KafkaError as err: log.error("Failed fetch messages from %s: %s", node_id, err) await asyncio.sleep(self._retry_backoff) return False except asyncio.CancelledError: return False if not assignment.active: log.debug( "Discarding fetch response since the assignment changed during" " fetch") return False fetch_offsets = {} for topic, partitions in request.topics: for partition, offset, _ in partitions: fetch_offsets[TopicPartition(topic, partition)] = offset now_ms = int(1000 * time.time()) for topic, partitions in response.topics: for partition, error_code, highwater, *part_data in partitions: tp = TopicPartition(topic, partition) error_type = Errors.for_code(error_code) fetch_offset = fetch_offsets[tp] tp_state = assignment.state_value(tp) if not tp_state.has_valid_position or tp_state.position != fetch_offset: log.debug( "Discarding fetch response for partition %s " "since its offset %s does not match the current " "position", tp, fetch_offset) continue if error_type is Errors.NoError: if request.API_VERSION >= 4: aborted_transactions = part_data[-2] lso = part_data[-3] else: aborted_transactions = None lso = None tp_state.highwater = highwater tp_state.lso = lso tp_state.timestamp = now_ms records = MemoryRecords(part_data[-1]) if records.has_next(): log.debug( "Adding fetched record for partition %s with" " offset %d to buffered record list", tp, fetch_offset) partition_records = PartitionRecords( tp, records, aborted_transactions, fetch_offset, self._key_deserializer, self._value_deserializer, self._check_crcs, self._isolation_level) self._records[tp] = FetchResult( tp, partition_records=partition_records, assignment=assignment, backoff=self._prefetch_backoff) needs_wakeup = True elif records.size_in_bytes() > 0: err = RecordTooLargeError( "There are some messages at [Partition=Offset]: " "%s=%s whose size is larger than the fetch size %s" " and hence cannot be ever returned. " "Increase the fetch size, or decrease the maximum " "message size the broker will allow.", tp, fetch_offset, self._max_partition_fetch_bytes) self._set_error(tp, err) tp_state.consumed_to(tp_state.position + 1) needs_wakeup = True elif error_type in (Errors.NotLeaderForPartitionError, Errors.UnknownTopicOrPartitionError): self._client.force_metadata_update() elif error_type is Errors.OffsetOutOfRangeError: if self._default_reset_strategy != OffsetResetStrategy.NONE: tp_state.await_reset(self._default_reset_strategy) else: err = Errors.OffsetOutOfRangeError({tp: fetch_offset}) self._set_error(tp, err) needs_wakeup = True log.info( "Fetch offset %s is out of range for partition %s," " resetting offset", fetch_offset, tp) elif error_type is Errors.TopicAuthorizationFailedError: log.warning( "Not authorized to read from topic %s.", tp.topic) err = Errors.TopicAuthorizationFailedError(tp.topic) self._set_error(tp, err) needs_wakeup = True else: log.warning('Unexpected error while fetching data: %s', error_type.__name__) return needs_wakeup def _set_error(self, tp, error): assert tp not in self._records, self._records[tp] self._records[tp] = FetchError( error=error, backoff=self._prefetch_backoff) async def _update_fetch_positions(self, assignment, node_id, tps): log.debug("Updating fetch positions for partitions %s", tps) needs_wakeup = False for tp in tps: tp_state = assignment.state_value(tp) if tp_state.has_valid_position or tp_state.awaiting_reset: continue try: committed = await tp_state.fetch_committed() except asyncio.CancelledError: return needs_wakeup assert committed is not None if tp_state.has_valid_position or tp_state.awaiting_reset: continue if committed.offset == UNKNOWN_OFFSET: if self._default_reset_strategy != OffsetResetStrategy.NONE: tp_state.await_reset(self._default_reset_strategy) else: err = Errors.NoOffsetForPartitionError(tp) self._set_error(tp, err) needs_wakeup = True log.debug( "No committed offset found for %s", tp) else: log.debug("Resetting offset for partition %s to the " "committed offset %s", tp, committed) tp_state.reset_to(committed.offset) topic_data = collections.defaultdict(list) needs_reset = [] for tp in tps: tp_state = assignment.state_value(tp) if not tp_state.awaiting_reset: continue needs_reset.append(tp) strategy = tp_state.reset_strategy assert strategy is not None log.debug("Resetting offset for partition %s using %s strategy.", tp, OffsetResetStrategy.to_str(strategy)) topic_data[tp.topic].append((tp.partition, strategy)) if not topic_data: return needs_wakeup try: try: offsets = await self._proc_offset_request( node_id, topic_data) except Errors.KafkaError as err: log.error("Failed fetch offsets from %s: %s", node_id, err) await asyncio.sleep(self._retry_backoff) return needs_wakeup except asyncio.CancelledError: return needs_wakeup for tp in needs_reset: offset = offsets[tp][0] tp_state = assignment.state_value(tp) if tp_state.awaiting_reset: tp_state.reset_to(offset) return needs_wakeup async def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")): if not timestamps: return {} timeout = timeout_ms / 1000 start_time = time.monotonic() remaining = timeout while True: try: offsets = await asyncio.wait_for( self._proc_offset_requests(timestamps), timeout=None if remaining == float("inf") else remaining ) except asyncio.TimeoutError: break except Errors.KafkaError as error: if not error.retriable: raise error if error.invalid_metadata: self._client.force_metadata_update() elapsed = time.monotonic() - start_time remaining = max(0, remaining - elapsed) if remaining < self._retry_backoff: break await asyncio.sleep(self._retry_backoff) else: return offsets raise KafkaTimeoutError( "Failed to get offsets by times in %s ms" % timeout_ms)
Apache License 2.0
quantopian/zipline
tests/pipeline/test_international_markets.py
intervals_overlap
python
def intervals_overlap(a, b): a_strictly_before = a[1] < b[0] b_strictly_before = b[1] < a[0] return not (a_strictly_before or b_strictly_before)
Check whether a pair of datetime intervals overlap. Parameters ---------- a : (pd.Timestamp, pd.Timestamp) b : (pd.Timestamp, pd.Timestamp) Returns ------- have_overlap : bool Bool indicating whether there there is a non-empty intersection between the intervals.
https://github.com/quantopian/zipline/blob/014f1fc339dc8b7671d29be2d85ce57d3daec343/tests/pipeline/test_international_markets.py#L465-L484
from itertools import cycle, islice from nose_parameterized import parameterized import numpy as np import pandas as pd from trading_calendars import get_calendar from zipline.assets.synthetic import make_rotating_equity_info from zipline.data.in_memory_daily_bars import InMemoryDailyBarReader from zipline.pipeline.domain import ( CA_EQUITIES, GB_EQUITIES, US_EQUITIES, ) from zipline.pipeline import Pipeline from zipline.pipeline.data import EquityPricing, USEquityPricing from zipline.pipeline.engine import SimplePipelineEngine from zipline.pipeline.loaders.equity_pricing_loader import EquityPricingLoader from zipline.pipeline.loaders.synthetic import NullAdjustmentReader from zipline.testing.predicates import assert_equal from zipline.testing.core import parameter_space, random_tick_prices import zipline.testing.fixtures as zf def T(s): return pd.Timestamp(s, tz='UTC') class WithInternationalDailyBarData(zf.WithAssetFinder): DAILY_BAR_START_DATE = zf.alias('START_DATE') DAILY_BAR_END_DATE = zf.alias('END_DATE') DAILY_BAR_LOOKBACK_DAYS = 0 INTERNATIONAL_PRICING_STARTING_PRICES = { 'XNYS': 100, 'XTSE': 50, 'XLON': 25, } INTERNATIONAL_PRICING_CURRENCIES = { 'XNYS': ['USD'], 'XTSE': ['CAD'], 'XLON': ['GBP', 'EUR', 'USD'], } assert ( INTERNATIONAL_PRICING_STARTING_PRICES.keys() == INTERNATIONAL_PRICING_CURRENCIES.keys() ) FX_RATES_CURRENCIES = ["USD", "CAD", "GBP", "EUR"] @classmethod def make_daily_bar_data(cls, assets, calendar, sessions): start = cls.INTERNATIONAL_PRICING_STARTING_PRICES[calendar.name] closes = random_tick_prices(start, len(sessions)) opens = closes - 0.05 highs = closes + 0.10 lows = closes - 0.10 volumes = np.arange(10000, 10000 + len(closes)) base_frame = pd.DataFrame({ 'close': closes, 'open': opens, 'high': highs, 'low': lows, 'volume': volumes, }, index=sessions) for asset in assets: sid = asset.sid yield sid, base_frame + sid @classmethod def make_currency_codes(cls, calendar, assets): currencies = cls.INTERNATIONAL_PRICING_CURRENCIES[calendar.name] return pd.Series( index=assets, data=list(islice(cycle(currencies), len(assets))) ) @classmethod def init_class_fixtures(cls): super(WithInternationalDailyBarData, cls).init_class_fixtures() cls.daily_bar_sessions = {} cls.daily_bar_data = {} cls.daily_bar_readers = {} cls.daily_bar_currency_codes = {} for calendar, assets, in cls.assets_by_calendar.items(): name = calendar.name start_delta = cls.DAILY_BAR_LOOKBACK_DAYS * calendar.day start_session = cls.DAILY_BAR_START_DATE - start_delta sessions = calendar.sessions_in_range( start_session, cls.DAILY_BAR_END_DATE, ) cls.daily_bar_sessions[name] = sessions cls.daily_bar_data[name] = dict(cls.make_daily_bar_data( assets=assets, calendar=calendar, sessions=sessions, )) panel = (pd.Panel.from_dict(cls.daily_bar_data[name]) .transpose(2, 1, 0)) cls.daily_bar_currency_codes[name] = cls.make_currency_codes( calendar, assets, ) cls.daily_bar_readers[name] = InMemoryDailyBarReader.from_panel( panel, calendar, currency_codes=cls.daily_bar_currency_codes[name], ) class WithInternationalPricingPipelineEngine(zf.WithFXRates, WithInternationalDailyBarData): @classmethod def init_class_fixtures(cls): (super(WithInternationalPricingPipelineEngine, cls) .init_class_fixtures()) adjustments = NullAdjustmentReader() cls.loaders = { GB_EQUITIES: EquityPricingLoader( cls.daily_bar_readers['XLON'], adjustments, cls.in_memory_fx_rate_reader, ), US_EQUITIES: EquityPricingLoader( cls.daily_bar_readers['XNYS'], adjustments, cls.in_memory_fx_rate_reader, ), CA_EQUITIES: EquityPricingLoader( cls.daily_bar_readers['XTSE'], adjustments, cls.in_memory_fx_rate_reader, ) } cls.engine = SimplePipelineEngine( get_loader=cls.get_loader, asset_finder=cls.asset_finder, ) @classmethod def get_loader(cls, column): return cls.loaders[column.domain] def run_pipeline(self, pipeline, start_date, end_date): return self.engine.run_pipeline(pipeline, start_date, end_date) class InternationalEquityTestCase(WithInternationalPricingPipelineEngine, zf.ZiplineTestCase): START_DATE = T('2014-01-02') END_DATE = T('2014-02-06') EXCHANGE_INFO = pd.DataFrame.from_records([ {'exchange': 'XNYS', 'country_code': 'US'}, {'exchange': 'XTSE', 'country_code': 'CA'}, {'exchange': 'XLON', 'country_code': 'GB'}, ]) @classmethod def make_equity_info(cls): out = pd.concat( [ make_rotating_equity_info( num_assets=20, first_start=cls.START_DATE, frequency=get_calendar(exchange).day, periods_between_starts=1, asset_lifetime=5, exchange=exchange, ) for exchange in cls.EXCHANGE_INFO.exchange ], ignore_index=True, ) assert_equal(out.end_date.max(), cls.END_DATE) return out @classmethod def make_exchanges_info(cls, equities, futures, root_symbols): return cls.EXCHANGE_INFO @parameter_space(domain=[CA_EQUITIES, US_EQUITIES, GB_EQUITIES]) def test_generic_pipeline_with_explicit_domain(self, domain): calendar = domain.calendar pipe = Pipeline({ 'open': EquityPricing.open.latest, 'high': EquityPricing.high.latest, 'low': EquityPricing.low.latest, 'close': EquityPricing.close.latest, 'volume': EquityPricing.volume.latest, }, domain=domain) sessions = self.daily_bar_sessions[calendar.name] start, end = sessions[[-17, -10]] result = self.run_pipeline(pipe, start, end) all_assets = self.assets_by_calendar[calendar] expected_assets = [ a for a in all_assets if alive_in_range(a, start, end, include_asset_start_date=False) ] expected_dates = sessions[-17:-9] for col in pipe.columns: result_data = result[col].unstack() assert_equal(pd.Index(expected_assets), result_data.columns) assert_equal(expected_dates, result_data.index) for asset in expected_assets: for date in expected_dates: value = result_data.at[date, asset] self.check_expected_latest_value( calendar, col, date, asset, value, ) @parameterized.expand([ ('US', US_EQUITIES, 'XNYS'), ('CA', CA_EQUITIES, 'XTSE'), ('GB', GB_EQUITIES, 'XLON'), ]) def test_currency_convert_prices(self, name, domain, calendar_name): pipe = Pipeline({ 'close': EquityPricing.close.latest, 'close_USD': EquityPricing.close.fx('USD').latest, 'close_CAD': EquityPricing.close.fx('CAD').latest, 'close_EUR': EquityPricing.close.fx('EUR').latest, 'close_GBP': EquityPricing.close.fx('GBP').latest, }, domain=domain) sessions = self.daily_bar_sessions[calendar_name] execution_sessions = sessions[-17:-9] start, end = execution_sessions[[0, -1]] result = self.run_pipeline(pipe, start, end) closes_2d = result['close'].unstack(fill_value=np.nan) all_currency_codes = self.daily_bar_currency_codes[calendar_name] currency_codes = all_currency_codes.loc[[ a.sid for a in closes_2d.columns ]] fx_reader = self.in_memory_fx_rate_reader for target in self.FX_RATES_CURRENCIES: result_2d = result['close_' + target].unstack(fill_value=np.nan) expected_rates = fx_reader.get_rates( rate='mid', quote=target, bases=np.array(currency_codes, dtype=object), dts=sessions[-18:-10], ) expected_result_2d = closes_2d * expected_rates assert_equal(result_2d, expected_result_2d) @parameterized.expand([ ('US', US_EQUITIES, 'XNYS'), ('CA', CA_EQUITIES, 'XTSE'), ('GB', GB_EQUITIES, 'XLON'), ]) def test_only_currency_converted_data(self, name, domain, calendar_name): pipe = Pipeline({ 'close_USD': EquityPricing.close.fx('USD').latest, 'close_EUR': EquityPricing.close.fx('EUR').latest, }, domain=domain) start, end = self.daily_bar_sessions[calendar_name][-2:] result = self.run_pipeline(pipe, start, end) calendar = get_calendar(calendar_name) daily_bars = self.daily_bar_data[calendar_name] currency_codes = self.daily_bar_currency_codes[calendar_name] for (dt, asset), row in result.iterrows(): price_date = dt - calendar.day expected_close = daily_bars[asset].loc[price_date, 'close'] expected_base = currency_codes.loc[asset] expected_rate_USD = self.in_memory_fx_rate_reader.get_rate_scalar( rate='mid', quote='USD', base=expected_base, dt=price_date.asm8, ) expected_price = expected_close * expected_rate_USD assert_equal(row.close_USD, expected_price) expected_rate_EUR = self.in_memory_fx_rate_reader.get_rate_scalar( rate='mid', quote='EUR', base=expected_base, dt=price_date.asm8, ) expected_price = expected_close * expected_rate_EUR assert_equal(row.close_EUR, expected_price) def test_explicit_specialization_matches_implicit(self): pipeline_specialized = Pipeline({ 'open': EquityPricing.open.latest, 'high': EquityPricing.high.latest, 'low': EquityPricing.low.latest, 'close': EquityPricing.close.latest, 'volume': EquityPricing.volume.latest, }, domain=US_EQUITIES) dataset_specialized = Pipeline({ 'open': USEquityPricing.open.latest, 'high': USEquityPricing.high.latest, 'low': USEquityPricing.low.latest, 'close': USEquityPricing.close.latest, 'volume': USEquityPricing.volume.latest, }) sessions = self.daily_bar_sessions['XNYS'] self.assert_identical_results( pipeline_specialized, dataset_specialized, sessions[1], sessions[-1], ) def test_cannot_convert_volume_data(self): with self.assertRaises(TypeError) as exc: EquityPricing.volume.fx('EUR') assert_equal( str(exc.exception), 'The .fx() method cannot be called on EquityPricing.volume ' 'because it does not produce currency-denominated data.', ) def check_expected_latest_value(self, calendar, col, date, asset, value): if np.isnan(value): self.assertTrue(date <= asset.start_date or date > asset.end_date) else: self.assertTrue(asset.start_date < date <= asset.end_date) bars = self.daily_bar_data[calendar.name] expected_value = bars[asset.sid].loc[date - calendar.day, col] assert_equal(value, expected_value) def assert_identical_results(self, left, right, start_date, end_date): left_result = self.run_pipeline(left, start_date, end_date) right_result = self.run_pipeline(right, start_date, end_date) assert_equal(left_result, right_result) def alive_in_range(asset, start, end, include_asset_start_date=False): if include_asset_start_date: asset_start = asset.start_date else: asset_start = asset.start_date + pd.Timedelta('1 day') return intervals_overlap((asset_start, asset.end_date), (start, end))
Apache License 2.0
aws/aws-parallelcluster-node
src/slurm_plugin/clustermgtd.py
ClusterManager._handle_powering_down_nodes
python
def _handle_powering_down_nodes(self, slurm_nodes): powering_down_nodes = [node for node in slurm_nodes if node.is_powering_down_with_nodeaddr()] if powering_down_nodes: log.info("Resetting powering down nodes: %s", print_with_count(powering_down_nodes)) reset_nodes(nodes=[node.name for node in powering_down_nodes]) instances_to_terminate = [node.instance.id for node in powering_down_nodes if node.instance] log.info("Terminating instances that are backing powering down nodes") self._instance_manager.delete_instances( instances_to_terminate, terminate_batch_size=self._config.terminate_max_batch_size )
Handle nodes that are powering down. Terminate instances backing the powering down node if any. Reset the nodeaddr for the powering down node. Node state is not changed.
https://github.com/aws/aws-parallelcluster-node/blob/00cee467f983b18075752ed3fbc3bfe52420147c/src/slurm_plugin/clustermgtd.py#L673-L688
import logging import os import time from configparser import ConfigParser from datetime import datetime, timezone from enum import Enum from logging.config import fileConfig import boto3 from boto3.dynamodb.conditions import Attr from botocore.config import Config from common.schedulers.slurm_commands import ( get_nodes_info, get_partition_info, reset_nodes, set_nodes_down, set_nodes_down_and_power_save, set_nodes_drain, update_all_partitions, update_partitions, ) from common.time_utils import seconds from common.utils import sleep_remaining_loop_time, time_is_up from retrying import retry from slurm_plugin.common import TIMESTAMP_FORMAT, log_exception, print_with_count, retrieve_instance_type_mapping from slurm_plugin.instance_manager import InstanceManager from slurm_plugin.slurm_resources import CONFIG_FILE_DIR, EC2InstanceHealthState, PartitionStatus, StaticNode LOOP_TIME = 60 log = logging.getLogger(__name__) class ComputeFleetStatus(Enum): STOPPED = "STOPPED" RUNNING = "RUNNING" STOPPING = "STOPPING" STARTING = "STARTING" STOP_REQUESTED = "STOP_REQUESTED" START_REQUESTED = "START_REQUESTED" PROTECTED = "PROTECTED" def __str__(self): return str(self.value) @staticmethod def is_stop_status(status): return status in {ComputeFleetStatus.STOP_REQUESTED, ComputeFleetStatus.STOPPING, ComputeFleetStatus.STOPPED} @staticmethod def is_start_in_progress(status): return status in {ComputeFleetStatus.START_REQUESTED, ComputeFleetStatus.STARTING} @staticmethod def is_stop_in_progress(status): return status in {ComputeFleetStatus.STOP_REQUESTED, ComputeFleetStatus.STOPPING} @staticmethod def is_protected_status(status): return status == ComputeFleetStatus.PROTECTED class ComputeFleetStatusManager: COMPUTE_FLEET_STATUS_KEY = "COMPUTE_FLEET" COMPUTE_FLEET_STATUS_ATTRIBUTE = "Status" LAST_UPDATED_TIME_ATTRIBUTE = "LastUpdatedTime" class ConditionalStatusUpdateFailed(Exception): pass def __init__(self, table_name, boto3_config, region): self._table_name = table_name self._boto3_config = boto3_config self.__region = region self._ddb_resource = boto3.resource("dynamodb", region_name=region, config=boto3_config) self._table = self._ddb_resource.Table(table_name) def get_status(self, fallback=None): try: compute_fleet_status = self._table.get_item(ConsistentRead=True, Key={"Id": self.COMPUTE_FLEET_STATUS_KEY}) if not compute_fleet_status or "Item" not in compute_fleet_status: raise Exception("COMPUTE_FLEET status not found in db table") return ComputeFleetStatus(compute_fleet_status["Item"][self.COMPUTE_FLEET_STATUS_ATTRIBUTE]) except Exception as e: log.error( "Failed when retrieving fleet status from DynamoDB with error %s, using fallback value %s", e, fallback ) return fallback def update_status(self, current_status, next_status): try: self._table.put_item( Item={ "Id": self.COMPUTE_FLEET_STATUS_KEY, self.COMPUTE_FLEET_STATUS_ATTRIBUTE: str(next_status), self.LAST_UPDATED_TIME_ATTRIBUTE: str(datetime.now(tz=timezone.utc)), }, ConditionExpression=Attr(self.COMPUTE_FLEET_STATUS_ATTRIBUTE).eq(str(current_status)), ) except self._ddb_resource.meta.client.exceptions.ConditionalCheckFailedException as e: raise ComputeFleetStatusManager.ConditionalStatusUpdateFailed(e) class ClustermgtdConfig: DEFAULTS = { "max_retry": 1, "loop_time": LOOP_TIME, "proxy": "NONE", "logging_config": os.path.join( os.path.dirname(__file__), "logging", "parallelcluster_clustermgtd_logging.conf" ), "instance_type_mapping": "/opt/slurm/etc/pcluster/instance_name_type_mappings.json", "launch_max_batch_size": 500, "update_node_address": True, "terminate_max_batch_size": 1000, "node_replacement_timeout": 1800, "terminate_drain_nodes": True, "terminate_down_nodes": True, "orphaned_instance_timeout": 120, "disable_ec2_health_check": False, "disable_scheduled_event_health_check": False, "disable_all_cluster_management": False, "health_check_timeout": 180, "hosted_zone": None, "dns_domain": None, "use_private_hostname": False, "protected_failure_count": 10, } def __init__(self, config_file_path): self._get_config(config_file_path) def __repr__(self): attrs = ", ".join(["{key}={value}".format(key=key, value=repr(value)) for key, value in self.__dict__.items()]) return "{class_name}({attrs})".format(class_name=self.__class__.__name__, attrs=attrs) def __eq__(self, other): if type(other) is type(self): return self._config == other._config and self.instance_name_type_mapping == other.instance_name_type_mapping return False def __ne__(self, other): return not self.__eq__(other) def _get_basic_config(self, config): self.region = config.get("clustermgtd", "region") self.cluster_name = config.get("clustermgtd", "cluster_name") self.dynamodb_table = config.get("clustermgtd", "dynamodb_table") self.head_node_private_ip = config.get("clustermgtd", "head_node_private_ip") self.head_node_hostname = config.get("clustermgtd", "head_node_hostname") instance_name_type_mapping_file = config.get( "clustermgtd", "instance_type_mapping", fallback=self.DEFAULTS.get("instance_type_mapping") ) self.instance_name_type_mapping = retrieve_instance_type_mapping(instance_name_type_mapping_file) self._boto3_retry = config.getint("clustermgtd", "boto3_retry", fallback=self.DEFAULTS.get("max_retry")) self._boto3_config = {"retries": {"max_attempts": self._boto3_retry, "mode": "standard"}} self.loop_time = config.getint("clustermgtd", "loop_time", fallback=self.DEFAULTS.get("loop_time")) self.disable_all_cluster_management = config.getboolean( "clustermgtd", "disable_all_cluster_management", fallback=self.DEFAULTS.get("disable_all_cluster_management"), ) self.heartbeat_file_path = config.get("clustermgtd", "heartbeat_file_path") proxy = config.get("clustermgtd", "proxy", fallback=self.DEFAULTS.get("proxy")) if proxy != "NONE": self._boto3_config["proxies"] = {"https": proxy} self.boto3_config = Config(**self._boto3_config) self.logging_config = config.get("clustermgtd", "logging_config", fallback=self.DEFAULTS.get("logging_config")) def _get_launch_config(self, config): self.launch_max_batch_size = config.getint( "clustermgtd", "launch_max_batch_size", fallback=self.DEFAULTS.get("launch_max_batch_size") ) self.update_node_address = config.getboolean( "clustermgtd", "update_node_address", fallback=self.DEFAULTS.get("update_node_address") ) def _get_health_check_config(self, config): self.disable_ec2_health_check = config.getboolean( "clustermgtd", "disable_ec2_health_check", fallback=self.DEFAULTS.get("disable_ec2_health_check") ) self.disable_scheduled_event_health_check = config.getboolean( "clustermgtd", "disable_scheduled_event_health_check", fallback=self.DEFAULTS.get("disable_scheduled_event_health_check"), ) self.health_check_timeout = config.getint( "clustermgtd", "health_check_timeout", fallback=self.DEFAULTS.get("health_check_timeout") ) self.disable_all_health_checks = config.getboolean( "clustermgtd", "disable_all_health_checks", fallback=(self.disable_ec2_health_check and self.disable_scheduled_event_health_check), ) def _get_terminate_config(self, config): self.terminate_max_batch_size = config.getint( "clustermgtd", "terminate_max_batch_size", fallback=self.DEFAULTS.get("terminate_max_batch_size") ) self.node_replacement_timeout = config.getint( "clustermgtd", "node_replacement_timeout", fallback=self.DEFAULTS.get("node_replacement_timeout") ) self.terminate_drain_nodes = config.getboolean( "clustermgtd", "terminate_drain_nodes", fallback=self.DEFAULTS.get("terminate_drain_nodes") ) self.terminate_down_nodes = config.getboolean( "clustermgtd", "terminate_down_nodes", fallback=self.DEFAULTS.get("terminate_down_nodes") ) self.orphaned_instance_timeout = config.getint( "clustermgtd", "orphaned_instance_timeout", fallback=self.DEFAULTS.get("orphaned_instance_timeout") ) self.protected_failure_count = config.getint( "clustermgtd", "protected_failure_count", fallback=self.DEFAULTS.get("protected_failure_count") ) def _get_dns_config(self, config): self.hosted_zone = config.get("clustermgtd", "hosted_zone", fallback=self.DEFAULTS.get("hosted_zone")) self.dns_domain = config.get("clustermgtd", "dns_domain", fallback=self.DEFAULTS.get("dns_domain")) self.use_private_hostname = config.getboolean( "clustermgtd", "use_private_hostname", fallback=self.DEFAULTS.get("use_private_hostname") ) @log_exception(log, "reading cluster manager configuration file", catch_exception=IOError, raise_on_error=True) def _get_config(self, config_file_path): log.info("Reading %s", config_file_path) self._config = ConfigParser() self._config.read_file(open(config_file_path, "r")) self._get_basic_config(self._config) self._get_health_check_config(self._config) self._get_launch_config(self._config) self._get_terminate_config(self._config) self._get_dns_config(self._config) class ClusterManager: class HealthCheckTypes(Enum): scheduled_event = "scheduled_events_check" ec2_health = "ec2_health_check" def __str__(self): return self.value class EC2InstancesInfoUnavailable(Exception): pass def __init__(self, config): self._static_nodes_in_replacement = set() self._partitions_protected_failure_count_map = {} self._compute_fleet_status = ComputeFleetStatus.RUNNING self._current_time = None self._config = None self._compute_fleet_status_manager = None self._instance_manager = None self.set_config(config) def set_config(self, config): if self._config != config: logging.info("Applying new clustermgtd config: %s", config) self._config = config self._compute_fleet_status_manager = self._initialize_compute_fleet_status_manager(config) self._instance_manager = self._initialize_instance_manager(config) @staticmethod def _initialize_instance_manager(config): return InstanceManager( config.region, config.cluster_name, config.boto3_config, table_name=config.dynamodb_table, hosted_zone=config.hosted_zone, dns_domain=config.dns_domain, use_private_hostname=config.use_private_hostname, head_node_private_ip=config.head_node_private_ip, head_node_hostname=config.head_node_hostname, instance_name_type_mapping=config.instance_name_type_mapping, ) @staticmethod def _initialize_compute_fleet_status_manager(config): return ComputeFleetStatusManager( table_name=config.dynamodb_table, boto3_config=config.boto3_config, region=config.region ) def _update_compute_fleet_status(self, status): log.info("Updating compute fleet status from %s to %s", self._compute_fleet_status, status) self._compute_fleet_status_manager.update_status(current_status=self._compute_fleet_status, next_status=status) self._compute_fleet_status = status @log_exception(log, "handling compute fleet status transitions", catch_exception=Exception, raise_on_error=False) def _manage_compute_fleet_status_transitions(self): self._compute_fleet_status = self._compute_fleet_status_manager.get_status(fallback=self._compute_fleet_status) log.info("Current compute fleet status: %s", self._compute_fleet_status) try: if ComputeFleetStatus.is_stop_status(self._compute_fleet_status): if self._compute_fleet_status == ComputeFleetStatus.STOP_REQUESTED: self._update_compute_fleet_status(ComputeFleetStatus.STOPPING) partitions_deactivated_successfully = update_all_partitions( PartitionStatus.INACTIVE, reset_node_addrs_hostname=True ) nodes_terminated = self._instance_manager.terminate_all_compute_nodes( self._config.terminate_max_batch_size ) if partitions_deactivated_successfully and nodes_terminated: if self._compute_fleet_status == ComputeFleetStatus.STOPPING: self._update_compute_fleet_status(ComputeFleetStatus.STOPPED) elif ComputeFleetStatus.is_start_in_progress(self._compute_fleet_status): if self._compute_fleet_status == ComputeFleetStatus.START_REQUESTED: self._update_compute_fleet_status(ComputeFleetStatus.STARTING) partitions_activated_successfully = update_all_partitions( PartitionStatus.UP, reset_node_addrs_hostname=False ) if partitions_activated_successfully: self._update_compute_fleet_status(ComputeFleetStatus.RUNNING) self._partitions_protected_failure_count_map = {} except ComputeFleetStatusManager.ConditionalStatusUpdateFailed: log.warning( "Cluster status was updated while handling a transition from %s. " "Status transition will be retried at the next iteration", self._compute_fleet_status, ) def _handle_successfully_launched_nodes(self, partitions_name_map): partitions_protected_failure_count_map = self._partitions_protected_failure_count_map.copy() for partition, failures_per_compute_resource in partitions_protected_failure_count_map.items(): partition_online_compute_resources = partitions_name_map[partition].get_online_node_by_type( self._config.terminate_drain_nodes, self._config.terminate_down_nodes ) for compute_resource in failures_per_compute_resource.keys(): if compute_resource in partition_online_compute_resources: self._reset_partition_failure_count(partition) break def manage_cluster(self): log.info("Managing cluster...") self._current_time = datetime.now(tz=timezone.utc) self._manage_compute_fleet_status_transitions() if not self._config.disable_all_cluster_management and self._compute_fleet_status in { None, ComputeFleetStatus.RUNNING, ComputeFleetStatus.PROTECTED, }: try: log.info("Retrieving nodes info from the scheduler") nodes = self._get_node_info_with_retry() log.debug("Nodes: %s", nodes) partitions_name_map = self._retrieve_scheduler_partitions(nodes) except Exception as e: log.error( "Unable to get partition/node info from slurm, no other action can be performed. Sleeping... " "Exception: %s", e, ) return try: cluster_instances = self._get_ec2_instances() except ClusterManager.EC2InstancesInfoUnavailable: log.error("Unable to get instances info from EC2, no other action can be performed. Sleeping...") return log.debug("Current cluster instances in EC2: %s", cluster_instances) partitions = list(partitions_name_map.values()) self._update_slurm_nodes_with_ec2_info(nodes, cluster_instances) self._clean_up_inactive_partition(partitions) if not self._config.disable_all_health_checks: self._perform_health_check_actions(partitions) self._maintain_nodes(partitions_name_map) self._terminate_orphaned_instances(cluster_instances) self._write_timestamp_to_file() def _write_timestamp_to_file(self): with open(os.open(self._config.heartbeat_file_path, os.O_WRONLY | os.O_CREAT, 0o644), "w") as timestamp_file: timestamp_file.write(datetime.now(tz=timezone.utc).strftime(TIMESTAMP_FORMAT)) @staticmethod @retry(stop_max_attempt_number=2, wait_fixed=1000) def _get_node_info_with_retry(nodes=""): return get_nodes_info(nodes) @staticmethod @retry(stop_max_attempt_number=2, wait_fixed=1000) def _get_partition_info_with_retry(): return {part.name: part for part in get_partition_info(get_all_nodes=True)} def _clean_up_inactive_partition(self, partitions): inactive_instance_ids, inactive_nodes = ClusterManager._get_inactive_instances_and_nodes(partitions) if inactive_nodes: try: log.info("Cleaning up INACTIVE partitions.") if inactive_instance_ids: log.info( "Clean up instances associated with nodes in INACTIVE partitions: %s", print_with_count(inactive_instance_ids), ) self._instance_manager.delete_instances( inactive_instance_ids, terminate_batch_size=self._config.terminate_max_batch_size ) self._reset_nodes_in_inactive_partitions(list(inactive_nodes)) except Exception as e: log.error("Failed to clean up INACTIVE nodes %s with exception %s", print_with_count(inactive_nodes), e) @staticmethod def _reset_nodes_in_inactive_partitions(inactive_nodes): nodes_to_reset = set() for node in inactive_nodes: if node.needs_reset_when_inactive(): nodes_to_reset.add(node.name) if nodes_to_reset: log.info( "Resetting nodeaddr/nodehostname and setting to down the following nodes: %s", print_with_count(nodes_to_reset), ) try: reset_nodes( nodes_to_reset, raise_on_error=False, state="down", reason="inactive partition", ) except Exception as e: log.error( "Encountered exception when resetting nodeaddr for INACTIVE nodes %s: %s", print_with_count(nodes_to_reset), e, ) def _get_ec2_instances(self): time.sleep(5) log.info("Retrieving list of EC2 instances associated with the cluster") try: return self._instance_manager.get_cluster_instances(include_head_node=False, alive_states_only=True) except Exception as e: log.error("Failed when getting instance info from EC2 with exception %s", e) raise ClusterManager.EC2InstancesInfoUnavailable @log_exception(log, "performing health check action", catch_exception=Exception, raise_on_error=False) def _perform_health_check_actions(self, partitions): log.info("Performing instance health check actions") instance_id_to_active_node_map = ClusterManager.get_instance_id_to_active_node_map(partitions) if not instance_id_to_active_node_map: return unhealthy_instances_status = self._instance_manager.get_unhealthy_cluster_instance_status( list(instance_id_to_active_node_map.keys()) ) log.debug("Cluster instances that might be considered unhealthy: %s", unhealthy_instances_status) if unhealthy_instances_status: if not self._config.disable_ec2_health_check: self._handle_health_check( unhealthy_instances_status, instance_id_to_active_node_map, health_check_type=ClusterManager.HealthCheckTypes.ec2_health, ) if not self._config.disable_scheduled_event_health_check: self._handle_health_check( unhealthy_instances_status, instance_id_to_active_node_map, health_check_type=ClusterManager.HealthCheckTypes.scheduled_event, ) def _get_nodes_failing_health_check( self, unhealthy_instances_status, instance_id_to_active_node_map, health_check_type ): log.info("Performing actions for health check type: %s", health_check_type) nodes_failing_health_check = [] for instance_status in unhealthy_instances_status: unhealthy_node = instance_id_to_active_node_map.get(instance_status.id) if unhealthy_node and self._is_instance_unhealthy(instance_status, health_check_type): nodes_failing_health_check.append(unhealthy_node) unhealthy_node.is_failing_health_check = True log.warning( "Node %s(%s) is associated with instance %s that is failing %s. EC2 health state: %s", unhealthy_node.name, unhealthy_node.nodeaddr, instance_status.id, health_check_type, [ instance_status.id, instance_status.state, instance_status.instance_status, instance_status.system_status, instance_status.scheduled_events, ], ) return nodes_failing_health_check def _is_instance_unhealthy(self, instance_status: EC2InstanceHealthState, health_check_type): is_instance_status_unhealthy = False if health_check_type == ClusterManager.HealthCheckTypes.scheduled_event: is_instance_status_unhealthy = instance_status.fail_scheduled_events_check() elif health_check_type == ClusterManager.HealthCheckTypes.ec2_health: is_instance_status_unhealthy = instance_status.fail_ec2_health_check( self._current_time, self._config.health_check_timeout ) return is_instance_status_unhealthy def _update_static_nodes_in_replacement(self, slurm_nodes): nodename_to_slurm_nodes_map = {node.name: node for node in slurm_nodes} nodes_still_in_replacement = set() for nodename in self._static_nodes_in_replacement: node = nodename_to_slurm_nodes_map.get(nodename) if node and not node.is_up(): nodes_still_in_replacement.add(nodename) self._static_nodes_in_replacement = nodes_still_in_replacement for node in slurm_nodes: node.is_static_nodes_in_replacement = node.name in self._static_nodes_in_replacement node._is_being_replaced = self._is_node_being_replaced(node) node._is_replacement_timeout = self._is_node_replacement_timeout(node) def _find_unhealthy_slurm_nodes(self, slurm_nodes): unhealthy_static_nodes = [] unhealthy_dynamic_nodes = [] for node in slurm_nodes: if not node.is_healthy(self._config.terminate_drain_nodes, self._config.terminate_down_nodes): if isinstance(node, StaticNode): unhealthy_static_nodes.append(node) else: unhealthy_dynamic_nodes.append(node) return ( unhealthy_dynamic_nodes, unhealthy_static_nodes, ) def _increase_partitions_protected_failure_count(self, bootstrap_failure_nodes): for node in bootstrap_failure_nodes: compute_resource = node.get_compute_resource_name() for p in node.partitions: if p in self._partitions_protected_failure_count_map: self._partitions_protected_failure_count_map[p][compute_resource] = ( self._partitions_protected_failure_count_map[p].get(compute_resource, 0) + 1 ) else: self._partitions_protected_failure_count_map[p] = {} self._partitions_protected_failure_count_map[p][compute_resource] = 1 @log_exception(log, "maintaining unhealthy dynamic nodes", raise_on_error=False) def _handle_unhealthy_dynamic_nodes(self, unhealthy_dynamic_nodes): instances_to_terminate = [node.instance.id for node in unhealthy_dynamic_nodes if node.instance] if instances_to_terminate: log.info("Terminating instances that are backing unhealthy dynamic nodes") self._instance_manager.delete_instances( instances_to_terminate, terminate_batch_size=self._config.terminate_max_batch_size ) log.info("Setting unhealthy dynamic nodes to down and power_down.") set_nodes_down_and_power_save( [node.name for node in unhealthy_dynamic_nodes], reason="Scheduler health check failed" ) @log_exception(log, "maintaining powering down nodes", raise_on_error=False)
Apache License 2.0
kuri65536/python-for-android
python-modules/twisted/twisted/test/stdio_test_halfclose.py
HalfCloseProtocol.readConnectionLost
python
def readConnectionLost(self): self.exitCode = 0 reactor.stop()
This is the desired event. Once it has happened, stop the reactor so the process will exit.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/test/stdio_test_halfclose.py#L38-L44
import sys from zope.interface import implements from twisted.internet.interfaces import IHalfCloseableProtocol from twisted.internet import stdio, protocol from twisted.python import reflect, log class HalfCloseProtocol(protocol.Protocol): implements(IHalfCloseableProtocol) exitCode = None def connectionMade(self): self.transport.write("x")
Apache License 2.0
randolphvi/question-difficulty-prediction
TF/utils/data_helpers.py
get_out_dir
python
def get_out_dir(option, logger): if option == 'T': timestamp = str(int(time.time())) out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp)) logger.info("Writing to {0}\n".format(out_dir)) if option == 'R': MODEL = input("[Input] Please input the checkpoints model you want to restore, " "it should be like (1490175368): ") while not (MODEL.isdigit() and len(MODEL) == 10): MODEL = input("[Warning] The format of your input is illegal, please re-input: ") out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", MODEL)) logger.info("Writing to {0}\n".format(out_dir)) return out_dir
Get the out dir. Args: option: Train or Restore logger: The logger Returns: The output dir
https://github.com/randolphvi/question-difficulty-prediction/blob/77b4b83b5bc747c5074926d7a37545a5d46ed343/TF/utils/data_helpers.py#L87-L109
__author__ = 'Randolph' import os import math import time import gensim import logging import json import numpy as np from collections import OrderedDict from scipy import stats from texttable import Texttable from gensim.models import KeyedVectors from tflearn.data_utils import pad_sequences def option(pattern): if pattern == 0: OPTION = input("[Input] Train or Restore? (T/R): ") while not (OPTION.upper() in ['T', 'R']): OPTION = input("[Warning] The format of your input is illegal, please re-input: ") if pattern == 1: OPTION = input("Load Best or Latest Model? (B/L): ") while not (OPTION.isalpha() and OPTION.upper() in ['B', 'L']): OPTION = input("[Warning] The format of your input is illegal, please re-input: ") return OPTION.upper() def logger_fn(name, input_file, level=logging.INFO): logger = logging.getLogger(name) logger.setLevel(level) log_dir = os.path.dirname(input_file) if not os.path.exists(log_dir): os.makedirs(log_dir) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fh = logging.FileHandler(input_file, mode='w') fh.setFormatter(formatter) logger.addHandler(fh) sh = logging.StreamHandler() sh.setFormatter(formatter) sh.setLevel(logging.WARNING) logger.addHandler(sh) return logger def tab_printer(args, logger): args = vars(args) keys = sorted(args.keys()) t = Texttable() t.add_rows([[k.replace("_", " ").capitalize(), args[k]] for k in keys]) t.add_rows([["Parameter", "Value"]]) logger.info('\n' + t.draw())
Apache License 2.0
pytorch/opacus
opacus/utils/uniform_sampler.py
DistributedPoissonBatchSampler.__len__
python
def __len__(self) -> int: return self.num_batches
Expected number of batches.
https://github.com/pytorch/opacus/blob/78e2f49c6498de206ca06c4da69e5e53763779f4/opacus/utils/uniform_sampler.py#L140-L144
from typing import Optional import torch from torch.utils.data import Sampler class UniformWithReplacementSampler(Sampler): def __init__(self, num_samples: int, sample_rate: float, generator=None): self.num_samples = num_samples self.sample_rate = sample_rate self.generator = generator if self.generator is None: generator = torch.Generator() generator.manual_seed( int(torch.empty((), dtype=torch.int64).random_().item()) ) if self.num_samples <= 0: raise ValueError( "num_samples should be a positive integer " "value, but got num_samples={}".format(self.num_samples) ) def __len__(self): return int(1 / self.sample_rate) def __iter__(self): num_batches = int(1 / self.sample_rate) while num_batches > 0: mask = ( torch.rand(self.num_samples, generator=self.generator) < self.sample_rate ) indices = mask.nonzero(as_tuple=False).reshape(-1).tolist() if len(indices) != 0: yield indices num_batches -= 1 class DistributedPoissonBatchSampler(Sampler): def __init__( self, total_size: int, sample_rate: float, num_replicas: Optional[int] = None, rank: Optional[int] = None, shuffle: bool = True, seed: int = 0, generator=None, ): self.total_size = total_size self.sample_rate = sample_rate self.generator = generator self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.shuffle = shuffle self.seed = seed if self.generator is None: generator = torch.Generator() generator.manual_seed( int(torch.empty((), dtype=torch.int64).random_().item()) ) if self.total_size <= 0: raise ValueError( "total_size should be a positive integer " "value, but got total_size={}".format(self.total_size) ) self.num_samples = self.total_size // self.num_replicas if self.rank < self.total_size % self.num_replicas: self.num_samples += 1 self.num_batches = int(1 / self.sample_rate) def __len__(self) -> int: return self.num_batches def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(self.total_size, generator=g) else: indices = torch.arange(self.total_size) indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples for _ in range(self.num_batches): mask = ( torch.rand(self.num_samples, generator=self.generator) < self.sample_rate ) selected_examples = mask.nonzero(as_tuple=False).reshape(-1) if len(selected_examples) > 0: yield indices[selected_examples]
Apache License 2.0
yaworsw/sublime-scopealways
ScopeAlways.py
plugin_loaded
python
def plugin_loaded(): global settings update_settings() settings.add_on_change('extensions_path', update_settings)
When the plugin is loaded then load the plugin's settings.
https://github.com/yaworsw/sublime-scopealways/blob/71c911353f4e9de91ace8fb9ad15a17ffb0715a5/ScopeAlways.py#L9-L15
import sublime, sublime_plugin status_format = '%s' status_key = 'scope_always' on = False settings = None
MIT License
python-acoustics/python-acoustics
acoustics/_signal.py
Signal.peak
python
def peak(self, axis=-1): return acoustics.standards.iso_tr_25417_2007.peak_sound_pressure(self, axis=axis)
Peak sound pressure. :param axis: Axis. .. seealso:: :func:`acoustic.standards.iso_tr_25417_2007.peak_sound_pressure`
https://github.com/python-acoustics/python-acoustics/blob/fbc87454422c41e1a39e282d7680126a6d8014dd/acoustics/_signal.py#L400-L410
import itertools import matplotlib.pyplot as plt import numpy as np from scipy.io import wavfile from scipy.signal import detrend, lfilter, bilinear, spectrogram, filtfilt, resample, fftconvolve import acoustics from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE from acoustics.standards.iec_61672_1_2013 import WEIGHTING_SYSTEMS from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES, NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES) class Signal(np.ndarray): def __new__(cls, data, fs): obj = np.asarray(data).view(cls) obj.fs = fs return obj def __array_prepare__(self, array, context=None): try: a = context[1][0] b = context[1][1] except IndexError: return array if hasattr(a, 'fs') and hasattr(b, 'fs'): if a.fs == b.fs: return array else: raise ValueError("Sample frequencies do not match.") else: return array def __array_wrap__(self, out_arr, context=None): return np.ndarray.__array_wrap__(self, out_arr, context) def __array_finalize__(self, obj): if obj is None: return self.fs = getattr(obj, 'fs', None) def __reduce__(self): pickled_state = super(Signal, self).__reduce__() new_state = pickled_state[2] + (self.fs, ) return (pickled_state[0], pickled_state[1], new_state) def __setstate__(self, state): self.fs = state[-1] super(Signal, self).__setstate__(state[0:-1]) def __repr__(self): return "Signal({})".format(str(self)) def _construct(self, x): return Signal(x, self.fs) @property def samples(self): return self.shape[-1] @property def channels(self): if self.ndim > 1: return self.shape[-2] else: return 1 @property def duration(self): return float(self.samples / self.fs) @property def values(self): return np.array(self) def calibrate_to(self, decibel, inplace=False): decibel = decibel * np.ones(self.shape) gain = decibel - self.leq()[..., None] return self.gain(gain, inplace=inplace) def calibrate_with(self, other, decibel, inplace=False): if not isinstance(other, Signal): other = Signal(other, self.fs) gain = decibel - other.leq() return self.gain(gain, inplace=inplace) def decimate(self, factor, zero_phase=False, ftype='iir', order=None): return Signal( acoustics.signal.decimate(x=self, q=factor, n=order, ftype=ftype, zero_phase=zero_phase), self.fs / factor) def resample(self, nsamples, times=None, axis=-1, window=None): return Signal(resample(self, nsamples, times, axis, window), nsamples / self.samples * self.fs) def upsample(self, factor, axis=-1): return self.resample(int(self.samples * factor), axis=axis) def gain(self, decibel, inplace=False): factor = 10.0**(decibel / 20.0) if inplace: self *= factor return self else: return self * factor def pick(self, start=0.0, stop=None): if start is not None: start = int(np.floor(start * self.fs)) if stop is not None: stop = int(np.floor(stop * self.fs)) return self[..., start:stop] def times(self): return np.arange(0, self.samples) / self.fs def energy(self): return float((self * self).sum()) def power(self): return self.energy() / len(self) def ms(self): return acoustics.signal.ms(self) def rms(self): return acoustics.signal.rms(self) def weigh(self, weighting='A', zero_phase=False): num, den = WEIGHTING_SYSTEMS[weighting]() b, a = bilinear(num, den, self.fs) func = filtfilt if zero_phase else lfilter return self._construct(func(b, a, self)) def correlate(self, other=None, mode='full'): if other is None: other = self if self.fs != other.fs: raise ValueError("Cannot correlate. Sample frequencies are not the same.") if self.channels > 1 or other.channels > 1: raise ValueError("Cannot correlate. Not supported for multichannel signals.") return self._construct(fftconvolve(self, other[::-1], mode=mode)) def amplitude_envelope(self): return self._construct(acoustics.signal.amplitude_envelope(self, self.fs)) def instantaneous_frequency(self): return self._construct(acoustics.signal.instantaneous_frequency(self, self.fs)) def instantaneous_phase(self): return self._construct(acoustics.signal.instantaneous_phase(self, self.fs)) def detrend(self, **kwargs): return self._construct(detrend(self, **kwargs)) def unwrap(self): return self._construct(np.unwrap(self)) def complex_cepstrum(self, N=None): if N is not None: times = np.linspace(0.0, self.duration, N, endpoint=False) else: times = self.times() cepstrum, ndelay = acoustics.cepstrum.complex_cepstrum(self, n=N) return times, cepstrum, ndelay def real_cepstrum(self, N=None): if N is not None: times = np.linspace(0.0, self.duration, N, endpoint=False) else: times = self.times() return times, acoustics.cepstrum.real_cepstrum(self, n=N) def power_spectrum(self, N=None): return acoustics.signal.power_spectrum(self, self.fs, N=N) def angle_spectrum(self, N=None): return acoustics.signal.angle_spectrum(self, self.fs, N=N) def phase_spectrum(self, N=None): return acoustics.signal.phase_spectrum(self, self.fs, N=N)
BSD 3-Clause New or Revised License
ing-bank/popmon
popmon/stats/numpy.py
std
python
def std(a, weights=None, axis=None, dtype=None, ddof=0, keepdims=False): if weights is None: return np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims) else: m = mean(a, weights=weights, axis=axis, keepdims=True) v = mean((a - m) ** 2, weights=weights, axis=axis, keepdims=keepdims, ddof=ddof) return np.sqrt(v)
Compute the weighted standard deviation along the specified axis. :param a: Array containing numbers whose standard deviation is desired. If `a` is not an array, a conversion is attempted. :param weights: Array containing weights for the elements of `a`. If `weights` is not an array, a conversion is attempted. :param axis: Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. Type is None or int or tuple of ints, optional. :param dtype: data type to use in computing the mean. :param int ddof: Delta Degrees of Freedom. The divisor used in calculations is ``W - ddof``, where ``W`` is the sum of weights (or number of elements if `weights` is None). By default `ddof` is zero :param bool keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. :return: np.ndarray
https://github.com/ing-bank/popmon/blob/b82ed51437fd19bc331e62ac28943fa630412bae/popmon/stats/numpy.py#L108-L131
import warnings import numpy as np import pandas as pd from scipy import stats def fraction_of_true(bin_labels, bin_entries): bin_labels = np.array(bin_labels) bin_entries = np.array(bin_entries) assert len(bin_labels) == len(bin_entries) def replace(bl): if bl in {"True", "true"}: return True elif bl in {"False", "false"}: return False return np.nan if len(bin_labels) == 0 or len(bin_labels) > 4 or np.sum(bin_entries) == 0: return np.nan if not np.all([isinstance(bl, (bool, np.bool_)) for bl in bin_labels]): if not np.all( [isinstance(bl, (str, np.str_, np.string_)) for bl in bin_labels] ): return np.nan n_true = (bin_labels == "True").sum() + (bin_labels == "true").sum() n_false = (bin_labels == "False").sum() + (bin_labels == "false").sum() n_nan = ( (bin_labels == "NaN").sum() + (bin_labels == "nan").sum() + (bin_labels == "None").sum() + (bin_labels == "none").sum() + (bin_labels == "Null").sum() + (bin_labels == "null").sum() ) if n_true + n_false + n_nan != len(bin_labels): return np.nan bin_labels = np.array([replace(bl) for bl in bin_labels]) sum_true = np.sum([be for bl, be in zip(bin_labels, bin_entries) if bl == True]) sum_false = np.sum([be for bl, be in zip(bin_labels, bin_entries) if bl == False]) sum_entries = sum_true + sum_false if sum_entries == 0: return np.nan return (1.0 * sum_true) / sum_entries def mean(a, weights=None, axis=None, dtype=None, keepdims=False, ddof=0): if weights is None: return np.mean(a, axis=axis, dtype=dtype, keepdims=keepdims) else: w = np.array(weights) return np.sum(w * np.array(a), axis=axis, dtype=dtype, keepdims=keepdims) / ( np.sum(w, axis=axis, dtype=dtype, keepdims=keepdims) - ddof )
MIT License
thenewboston-developers/validator
v1/status_updates/serializers/upgrade_request.py
UpgradeRequestSerializer.validate
python
def validate(self, data): self_configuration = get_self_configuration(exception_class=RuntimeError) self_node_identifier = self_configuration.node_identifier validator_node_identifier = data['validator_node_identifier'] if self_node_identifier != validator_node_identifier: raise serializers.ValidationError( f'self_node_identifier of {self_node_identifier} does not match ' f'validator_node_identifier of {validator_node_identifier}' ) return data
Check that self node_identifier matches validator_node_identifier - this ensures that the request was intended for self
https://github.com/thenewboston-developers/validator/blob/7994f5ca6cef02e4e548a857e62162ac69ce5332/v1/status_updates/serializers/upgrade_request.py#L41-L57
from django.core.cache import cache from rest_framework import serializers from thenewboston.constants.network import CONFIRMATION_VALIDATOR, PRIMARY_VALIDATOR, VERIFY_KEY_LENGTH from v1.banks.helpers.most_trusted import get_most_trusted_bank from v1.banks.models.bank import Bank from v1.cache_tools.cache_keys import BLOCK_QUEUE from v1.cache_tools.queued_confirmation_blocks import delete_all_queued_confirmation_blocks from v1.self_configurations.helpers.self_configuration import get_self_configuration from v1.tasks.upgrade_notices import send_upgrade_notices class UpgradeRequestSerializer(serializers.Serializer): node_identifier = serializers.CharField(max_length=VERIFY_KEY_LENGTH) validator_node_identifier = serializers.CharField(max_length=VERIFY_KEY_LENGTH) def create(self, validated_data): self_configuration = get_self_configuration(exception_class=RuntimeError) if self_configuration.node_type == CONFIRMATION_VALIDATOR: self_configuration.node_type = PRIMARY_VALIDATOR self_configuration.save() cache.set(BLOCK_QUEUE, [], None) delete_all_queued_confirmation_blocks() send_upgrade_notices.delay(requesting_banks_node_identifier=validated_data['node_identifier']) return self_configuration def update(self, instance, validated_data): pass
MIT License
google/ashier
ashierlib/directive.py
Send.ExpandVariables
python
def ExpandVariables(self, bindings): parts = re.split(r'(\$\w+)', self._message) for i in range(len(parts)): if parts[i].startswith('$'): name = parts[i][1:] parts[i] = bindings[name] return ''.join(parts)
Expand variables in the message. Args: bindings: a dictionary of variable bindings. Returns: The message to be sent, with variables of the form $var replaced by the strings they map to in the dictionary argument.
https://github.com/google/ashier/blob/76710289b2c459ed1ef8ca822cfa1d5fbad05b89/ashierlib/directive.py#L353-L369
__author__ = 'cklin@google.com (Chuan-kai Lin)' import os import re import utils def CreateLines(filename): lineno = 1 lines = [] try: with open(filename) as f: for line in f: lines.append(Line(filename, lineno, line)) lineno += 1 return lines except IOError as err: utils.ReportError(str(err)) return '' class Line(object): def __init__(self, filename, lineno, content): self.lineno = lineno self.content = content self._header = '%s:%d ' % (filename, lineno) def GetIndent(self): expanded = self.content.expandtabs(8) return len(expanded)-len(expanded.lstrip()) def StrippedContent(self): return self.content.lstrip().rstrip('\n') def WithIdentHeader(self, mesg): return self._header+mesg def ReportError(self, mesg): utils.ReportError(self.WithIdentHeader(mesg)) def ParseDirective(line): source = line.StrippedContent() if source.startswith('#') or not source: return None elif '\t' in source: line.ReportError('unexpected TAB in directive') elif source.startswith('>'): return Template(line, source[1:]) elif source.startswith('?'): if source == '?': line.ReportError('empty marker directive') else: syntax = re.compile(r' *(\.+) *(\w+)? *(?:/(.+)/)? *$') matches = syntax.match(source[1:]) if matches: start, finish = matches.span(1) name = matches.group(2) regex = matches.group(3) or '' return Marker(line, start, finish, name, regex) else: line.ReportError('malformed marker directive') elif source.startswith('!'): if source == '!': line.ReportError('empty action directive') else: syntax = re.compile(r' *(\w+) +"(.*)" *$') matches = syntax.match(source[1:]) if matches: channel = matches.group(1) message = matches.group(2) return Send(line, channel, message) else: line.ReportError('malformed action directive') else: line.ReportError('unrecognized directive syntax') return None class Template(object): def __init__(self, line, sample): self.line = line self.sample = sample self.ReportError = line.ReportError def InferSkip(self, start, finish): regex = '' for ch in re.sub(r'\s+', ' ', self.sample[start:finish]): regex += r'\s+' if ch == ' ' else re.escape(ch) try: match = re.match(regex, self.sample[start:]) assert match and match.end() >= finish-start, ( self.line.WithIdentHeader( 'skip pattern matches too few characters')) if match.end() > finish-start: self.ReportError('invalid boundary at column %d' % finish) except re.error: assert False, self.line.WithIdentHeader( 'ill-formed regular expression') return regex class Marker(object): def __init__(self, line, start, finish, name, regex): self.line = line self.start = start self.finish = finish self.name = name self._regex = utils.RemoveRegexBindingGroups(regex) self.ReportError = line.ReportError def InferRegex(self, template): sample = template.sample assert self.finish <= len(sample), ( 'marker extends beyond template') if not self._regex: if len(sample) == self.finish: self._regex = '.+' elif len(sample) > self.finish: delimiter = sample[self.finish] if sample.count(delimiter, self.start, self.finish) == 0: self._regex = '[^%s]+' % ( r'\s' if delimiter.isspace() else delimiter,) else: self.ReportError('delimiter appears in the marker') if self._regex: try: match = re.match(self._regex, sample[self.start:]) if not match or match.end() != self.finish-self.start: self.ReportError('regex does not match marker') except re.error: self.ReportError('ill-formed regular expression') return self._regex class Send(object): def __init__(self, line, channel, message): self.line = line self._channel = channel self._message = message self.ReportError = line.ReportError if channel not in ('controller', 'terminal'): self.ReportError('invalid channel name: %s' % (channel,)) def References(self): names = set() parts = re.split(r'(\$\w+)', self._message) for segment in parts: if segment.startswith('$'): names.add(segment[1:]) return names
Apache License 2.0
quantipy/quantipy
quantipy/core/helpers/functions.py
as_datetime64
python
def as_datetime64(data, date_format='dmy', date_sep='/', time_format='hm', time_sep=':'): has_time = ' ' in data[0] date_reorder = [date_format.index(p) for p in 'ymd'] if has_time: if 's' not in time_format: data = data + ':00' date_time = zip(*(data.str.split(' '))) time = pd.Series(date_time[1]) date = pd.Series(date_time[0]) date = date.str.split(date_sep).apply(lambda x: '-'.join([x[p] for p in date_reorder])) if has_time: date_time = (date +' '+ time).astype(np.datetime64) return date_time
Converts data, a dtype 'object' Series storing date or datetime as text, to a numpy.datetime64 Series object. The default argument values work for a Series in the format: "02/07/2014 16:58" as dtype object and will return: "2014-07-02 15:58:00" as dtype numpy.datetime64 There are still some issues with UTC that will need to be ironed out.
https://github.com/quantipy/quantipy/blob/82ef628d089ea9dbc47c91e842e50dd9e87bb29e/quantipy/core/helpers/functions.py#L845-L872
import pandas as pd import numpy as np import json import cPickle import re import copy import itertools import math import re, string from collections import OrderedDict, defaultdict from constants import DTYPE_MAP from constants import MAPPED_PATTERN from itertools import product import quantipy as qp def load_json(path_json, hook=OrderedDict): with open(path_json) as f: obj = json.load(f, object_pairs_hook=hook) return obj def loads_json(json_text, hook=OrderedDict): obj = json.loads(json_text, object_pairs_hook=hook) return obj def save_json(obj, path_json): with open(path_json, 'w+') as f: json.dump(obj, f) def df_to_browser(df, path_html='df.html', **kwargs): import webbrowser with open(path_html, 'w') as f: f.write(df.to_html(**kwargs)) webbrowser.open(path_html, new=2) def get_delimited_value_map(ds, ds_split=None, sep=';'): if ds_split is None: ds_split = ds.dropna().str.split(sep) delimited = pd.DataFrame(ds_split.tolist()) value_map = pd.unique(delimited.values.ravel()) value_map = np.sort(value_map[value_map.nonzero()]) return value_map def verify_dtypes_vs_meta(data, meta): dtypes = data.dtypes dtypes.name = 'dtype' var_types = pd.DataFrame({k: v['type'] for k, v in meta['columns'].iteritems()}, index=['meta']).T df = pd.concat([var_types, dtypes.astype(str)], axis=1) missing = df.loc[df['dtype'].isin([np.NaN])]['meta'] if missing.size>0: print '\nSome meta not paired to data columns was found (these may be special data types):\n', missing, '\n' df = df.dropna(how='any') df['verified'] = df.apply(lambda x: x['dtype'] in DTYPE_MAP[x['meta']], axis=1) return df def coerce_dtypes_from_meta(data, meta): data = data.copy() verified = verify_dtypes_vs_meta(data, meta) for idx in verified[~verified['verified']].index: meta = verified.loc[idx]['meta'] dtype = verified.loc[idx]['dtype'] if meta in ["int", "single"]: if dtype in ["object"]: data[idx] = data[idx].convert_objects(convert_numeric=True) data[idx] = data[idx].replace(np.NaN, 0).astype(int) return data def index_to_dict(index): if isinstance(index, pd.MultiIndex): levels = index.levels names = index.names index_dict = {names[i]: levels[i] for i in range(len(names))} else: index_dict = {None: index.tolist()} return index_dict def has_collapsed_axis(df, axis=0): agg_func = ('cbase', 'rbase', 'effbase', 'mean', 'net', 'promoters') if axis == 0: if df.index.get_level_values(1)[0].startswith(agg_func): return True else: if df.T.index.get_level_values(1)[0].startswith(agg_func): return True def get_view_slicer(meta, col, values=None): if values is None: slicer = [ (col, val['value']) for val in emulate_meta(meta, meta['columns'][col]['values'])] else: slicer = [ (col, val) for val in values] return slicer def paint_index(meta, index, text_key, display_names=False, transform_names=None, grp_text_map=None): single_row = len(index.values)==1 levels = get_index_levels(index) col = levels[0] values = list(levels[1]) if not col in meta['columns']: return index else: col_text = paint_col_text( meta, col, text_key, display_names, transform_names) values_text = paint_col_values_text( meta, col, values, text_key, grp_text_map) new_index = build_multiindex_from_tuples( col_text, values_text, ['Question', 'Values'], single_row) return new_index def paint_view(meta, view, text_key=None, display_names=None, transform_names=False, axes=['x', 'y']): if text_key is None: text_key = finish_text_key(meta, {}) if display_names is None: display_names = ['x', 'y'] is_array = any(view.meta()[axis]['is_array'] for axis in ['x', 'y']) if is_array: df = paint_array( meta, view, text_key, display_names, transform_names, axes) else: df = view.dataframe.copy() grp_text_map = view.meta()['agg']['grp_text_map'] df = paint_dataframe( meta, df, text_key, display_names, transform_names, axes, grp_text_map) return df def paint_dataframe(meta, df, text_key=None, display_names=None, transform_names=False, axes=['x', 'y'], grp_text_map=None): if text_key is None: text_key = finish_text_key(meta, {}) if display_names is None: display_names = ['x', 'y'] if 'x' in axes: display_x_names = 'x' in display_names if len(df.index.levels[0])>1: order = [] for x in df.index.labels[0]: if x not in order: order.append(x) levels = df.index.levels[0] it = sorted(zip(levels, order), key=lambda x: x[1]) df.index = pd.concat([ paint_dataframe( meta, df.ix[[level], :], text_key, display_names, transform_names, 'x', grp_text_map) for level, _ in it], axis=0).index else: df.index = paint_index( meta, df.index, text_key['x'], display_x_names, transform_names, grp_text_map) if 'y' in axes: display_y_names = 'y' in display_names if len(df.columns.levels[0])>1: df.columns = pd.concat([ paint_dataframe( meta, df.ix[:, [level]], text_key, display_names, transform_names, 'y', grp_text_map) for level in df.columns.levels[0]], axis=1).columns else: df.columns = paint_index( meta, df.columns, text_key['y'], display_y_names, transform_names) return df def paint_array(meta, view, text_key, display_names, transform_names, axes): df = view.dataframe.copy() grp_text_map = view.meta()['agg']['grp_text_map'] columns_on_x = view.meta()['x']['is_array'] axes_x = {True: 'x', False: 'y'} if 'x' in axes: display_x_names = axes_x.get(columns_on_x) in display_names index = paint_array_items_index( meta, df.index if columns_on_x else df.columns, text_key['x'], display_x_names) if 'y' in axes: display_y_names = axes_x.get(not columns_on_x) in display_names columns = paint_array_values_index( meta, df.columns if columns_on_x else df.index, text_key['y'], display_y_names, grp_text_map) df.index = index if columns_on_x else columns df.columns = columns if columns_on_x else index return df def get_index_levels(index): levels = [] idx_values = index.values single_row = len(idx_values)==1 if single_row: unzipped = [idx_values[0]] levels.append(unzipped[0][0]) levels.append([unzipped[0][1]]) else: unzipped = zip(*index.values) levels.append(unzipped[0][0]) levels.append(unzipped[1]) return levels def paint_col_text(meta, col, text_key, display_names, transform_names): col_meta = emulate_meta(meta, meta['columns'][col]) if display_names: try: col_name = col if transform_names: col_name = transform_names.get(col, col) col_text = '{}. {}'.format( col_name, get_text(col_meta['text'], text_key)) except UnicodeEncodeError: col_text = '{}. {}'.format( col_name, qp.core.tools.dp.io.unicoder( get_text(col_meta['text'], text_key), like_ascii=True)) else: col_text = get_text(col_meta['text'], text_key) return col_text def paint_add_text_map(meta, add_text_map, text_key): if add_text_map is None: add_text_map = {} else: try: add_text_map = { key: get_text(text, text_key) for key, text in add_text_map.iteritems()} except UnicodeEncodeError: add_text_map = { key: qp.core.tools.dp.io.unicoder( get_text(text, text_key, like_ascii=True)) for key, text in add_text_map.iteritems()} return add_text_map def paint_col_values_text(meta, col, values, text_key, add_text_map=None): add_text_map = paint_add_text_map(meta, add_text_map, text_key) num_col = meta['columns'][col]['type'] in ['int', 'float'] try: has_all = 'All' in values if has_all: values.remove('All') if not num_col: try: values_map = { val['value']: get_text(val['text'], text_key) for val in meta['columns'][col]['values']} except UnicodeEncodeError: values_map = { val['value']: qp.core.tools.dp.io.unicoder( get_text(val['text'], text_key, like_ascii=True)) for val in meta['columns'][col]['values']} else: values_map = {} values_map.update(add_text_map) values_text = [values_map[v] for v in values] except KeyError: values_text = values except ValueError: values_text = values if has_all: values_text = ['All'] + values_text return values_text def paint_mask_text(meta, mask, text_key, display_names): mask_meta = meta['masks'][mask] if display_names: try: mask_text = '{}. {}'.format( mask, get_text(mask_meta['text'], text_key)) except UnicodeEncodeError: mask_text = '{}. {}'.format( mask, qp.core.tools.dp.io.unicoder( get_text(mask_meta['text'], text_key), like_ascii=True)) else: mask_text = get_text(mask_meta['text'], text_key) return mask_text def paint_array_items_text(meta, mask, items, text_key): try: has_all = 'All' in items items = [i for i in items if not i=='All'] items_map = {} try: for item in meta['masks'][mask]['items']: if isinstance(item['text'], dict): text = get_text(item['text'], text_key) else: source = item['source'].split('@')[-1] text = get_text(meta['columns'][source]['text'], text_key) text = text.replace( '{} - '.format( get_text(meta['masks'][mask]['text'], text_key)), '') items_map.update({item['source'].split('@')[-1]: text}) except UnicodeEncodeError: for item in meta['masks'][mask]['items']: if isinstance(item['text'], dict): text = qp.core.tools.dp.io.unicoder( get_text(item['text'], text_key), like_ascii=True) else: source = item['source'].split('@')[-1] text = qp.core.tools.dp.io.unicoder( get_text(meta['columns'][source]['text'], text_key), like_ascii=True) text = qp.core.tools.dp.io.unicoder( text.replace( '{} - '.format( get_text(meta['masks'][mask]['text'], text_key)), ''), like_ascii=True) items_map.update({item['source'].split('@')[-1]: text}) items_text = [items_map[i] for i in items] if has_all: items_text = ['All'] + items_text except KeyError: items_text = items except ValueError: items_text = items return items_text def paint_array_values_text(meta, mask, values, text_key, add_text_map=None): add_text_map = paint_add_text_map(meta, add_text_map, text_key) values_meta = emulate_meta(meta, meta['masks'][mask]['values']) try: has_all = 'All' in values if has_all: values.remove('All') try: values_map = { val['value']: get_text(val['text'], text_key) for val in values_meta} except UnicodeEncodeError: values_map = { val['value']: qp.core.tools.dp.io.unicoder( get_text(val['text'], text_key, like_ascii=True)) for val in values_meta} values_map.update(add_text_map) values_text = [values_map[v] for v in values] if has_all: values_text = ['All'] + values_text except KeyError: values_text = values except ValueError: values_text = values return values_text def build_multiindex_from_tuples(l0_text, l1_text, names, single_row): if single_row: new_index = pd.MultiIndex.from_tuples( [(l0_text, l1_text[0])], names=names) else: new_index = pd.MultiIndex.from_product( [[l0_text], l1_text], names=names) return new_index def paint_array_items_index(meta, index, text_key, display_names): single_row = len(index.values)==1 levels = get_index_levels(index) mask = levels[0] items = levels[1] mask_text = paint_mask_text(meta, mask, text_key, display_names) items_text = paint_array_items_text(meta, mask, items, text_key) new_index = build_multiindex_from_tuples( mask_text, items_text, ['Array', 'Questions'], single_row) return new_index def paint_array_values_index(meta, index, text_key, display_names, grp_text_map=None): single_row = len(index.values)==1 levels = get_index_levels(index) mask = levels[0] values = levels[1] mask_text = paint_mask_text(meta, mask, text_key, display_names) values_text = paint_array_values_text( meta, mask, values, text_key, grp_text_map) new_index = build_multiindex_from_tuples( mask_text, values_text, ['Question', 'Values'], single_row) return new_index def get_rules(meta, col, axis): if col=='@': return None try: if col in meta['columns']: rules = meta['columns'][col]['rules'][axis] elif col in meta['masks']: rules = meta['masks'][col]['rules'][axis] return rules except: return None def get_rules_slicer(f, rules, copy=True): if copy: f = f.copy() if 'slicex' in rules: kwargs = rules['slicex'] values = kwargs.get('values', None) f = qp.core.tools.view.query.slicex(f, **kwargs) if 'sortx' in rules: kwargs = rules['sortx'] fixed = kwargs.get('fixed', None) sort_on = kwargs.get('sort_on', '@') f = qp.core.tools.view.query.sortx(f, **kwargs) if 'dropx' in rules: kwargs = rules['dropx'] values = kwargs.get('values', None) f = qp.core.tools.view.query.dropx(f, **kwargs) return f.index.values.tolist() def apply_rules(df, meta, rules): col_x = meta['columns'][df.index.levels[0][0]] col_y = meta['columns'][df.columns.levels[0][0]] if isinstance(rules, bool): rules = ['x', 'y'] if 'x' in rules and df.index.levels[1][0]!='@' and 'rules' in col_x: rx = col_x['rules'].get('x', None) if not rx is None: if 'slicex' in rx: kwargs = rx['slicex'] values = kwargs.get('values', None) if not values is None: kwargs['values'] = [str(v) for v in values] df = qp.core.tools.view.query.slicex(df, **kwargs) if 'sortx' in rx: kwargs = rx['sortx'] fixed = kwargs.get('fixed', None) if not fixed is None: kwargs['fixed'] = [str(f) for f in fixed] df = qp.core.tools.view.query.sortx(df, **kwargs) if 'dropx' in rx: kwargs = rx['dropx'] values = kwargs.get('values', None) if not values is None: kwargs['values'] = [str(v) for v in values] df = qp.core.tools.view.query.dropx(df, **kwargs) if 'y' in rules and df.columns.levels[1][0]!='@' and 'rules' in col_y: ry = col_y['rules'].get('y', None) if not ry is None: if 'slicex' in ry: kwargs = ry['slicex'] values = kwargs.get('values', None) if not values is None: kwargs['values'] = [str(v) for v in values] df = qp.core.tools.view.query.slicex(df.T, **kwargs).T if 'sortx' in ry: kwargs = ry['sortx'] fixed = kwargs.get('fixed', None) if not fixed is None: kwargs['fixed'] = [str(f) for f in fixed] df = qp.core.tools.view.query.sortx(df.T, **kwargs).T if 'dropx' in ry: kwargs = ry['dropx'] values = kwargs.get('values', None) if not values is None: kwargs['values'] = [str(v) for v in values] df = qp.core.tools.view.query.dropx(df.T, **kwargs).T return df def rule_viable_axes(meta, vk, x, y): viable_axes = ['x', 'y'] condensed_x = False condensed_y = False array_summary = (x in meta['masks'] and y == '@') transposed_summary = (y in meta['masks'] and x == '@') v_method = vk.split('|')[1] relation = vk.split('|')[2] s_name = vk.split('|')[-1] descriptive = v_method.startswith('.d') exp_net = '}+]' in relation array_sum_freqs = array_summary and s_name in ['counts', 'c%', 'r%'] if transposed_summary: x, y = y, x if (relation.split(":")[0].startswith('x') and not exp_net) or descriptive: if not array_summary: condensed_x = True elif relation.split(":")[1].startswith('y'): condensed_y = True else: if re.search('x\[.+:y$', relation) != None: condensed_x = True elif re.search('x:y\[.+', relation) != None: condensed_y = True if re.search('y\[.+:x$', relation) != None: condensed_y = True elif re.search('y:x\[.+', relation) != None: condensed_x = True if condensed_x or x=='@': viable_axes.remove('x') if condensed_y or (y=='@' and not array_sum_freqs): viable_axes.remove('y') return viable_axes def get_text(text, text_key, axis=None): if text is None: text = '' if isinstance(text, (str, unicode)): return text elif isinstance(text, (dict, OrderedDict)): if axis is None: if isinstance(text_key, (str, unicode)): if text_key in text: return text[text_key] else: for key in text_key: if key in text: return text[key] else: if axis in text_key.keys(): for key in text_key[axis]: if key in text: return text[key] raise KeyError( "No matching text key from the list {} was not found in the" " text object: {}".format(text_key, text) ) else: raise TypeError( "The value set into a 'text' object must either be" " <str> or <unicode>, or <dict> or <collections.OrderedDict>" " of <str> or <unicode>. Found: {}".format(text) ) def finish_text_key(meta, text_key): default_text = meta['lib'].get('default text', 'None') if text_key is None: text_key = {} for key in ['x', 'y']: if key in text_key.keys(): if isinstance(text_key[key], (str, unicode)): text_key[key] = [text_key[key], default_text] elif isinstance(text_key[key], list): text_key[key].append(default_text) else: raise TypeError( "text_key items must be <str> or <list>\n" "Found: %s" % (type(text_key[key])) ) else: text_key[key] = [default_text] return text_key def get_values(var_meta, meta): values = [] for value in var_meta['values']: if isinstance(value, dict): values.append(value) elif isinstance(value, (str, unicode)): values += get_mapped_meta(meta, value) return values def flatten_list(the_list, deep_flatten=False): if deep_flatten: flat = list(itertools.chain.from_iterable(the_list)) else: flat = [] for item in the_list: if isinstance(item, (list)): for subitem in item: flat.append(subitem) else: flat.append(item) return flat def is_mapped_meta(item): if isinstance(item, (str, unicode)): if item.split('@')[0] in ['lib', 'columns', 'masks', 'info', 'sets']: if re.match(MAPPED_PATTERN, item): return True return False def get_mapped_meta(meta, mapped): steps = mapped.split('@') key = steps.pop() for step in steps: if isinstance(meta, list): step = int(step) meta = meta[step] if key in meta: if isinstance(meta[key], (dict, OrderedDict)): meta = {key: meta[key]} else: meta = meta[key] return meta def create_multi_index(item_1, item_2, names=None): return pd.MultiIndex.from_product([[item_1], item_2], names=names) def apply_multi_index_items(view, multi_index_items=None, names=None): if multi_index_items is not None: for key in multi_index_items: item = multi_index_items[key] multi_index = create_multi_index(item[0], item[1], names=names) setattr(view, key, multi_index) def emulate_meta(meta, item): if is_mapped_meta(item): item = get_mapped_meta(meta, item) item = emulate_meta(meta, item) return item elif isinstance(item, (list, tuple, set)): for n, i in enumerate(item): item[n] = emulate_meta(meta, item[n]) item = flatten_list(item) return item elif isinstance(item, (dict, OrderedDict)): for k in item.keys(): item[k] = emulate_meta(meta, item[k]) return item else: return item
MIT License
etiennemd/alexa-ecovacs
Deploy/Crypto/PublicKey/pubkey.py
pubkey.verify
python
def verify (self, M, signature): if isinstance(M, bytes): M=bytes_to_long(M) return self._verify(M, signature)
Verify the validity of a signature. :Parameter M: The expected message. :Type M: byte string or long :Parameter signature: The signature to verify. :Type signature: tuple with two items, as return by `sign` :Return: True if the signature is correct, False otherwise.
https://github.com/etiennemd/alexa-ecovacs/blob/d0ee083c3d0728ebbfda3f41ae84979c6aad36d7/Deploy/Crypto/PublicKey/pubkey.py#L114-L126
__revision__ = "$Id$" import types, warnings from Crypto.Util.number import * class pubkey: def __init__(self): pass def __getstate__(self): d=self.__dict__ for key in self.keydata: if key in d: d[key]=int(d[key]) return d def __setstate__(self, d): for key in self.keydata: if key in d: self.__dict__[key]=bignum(d[key]) def encrypt(self, plaintext, K): wasString=0 if isinstance(plaintext, bytes): plaintext=bytes_to_long(plaintext) ; wasString=1 if isinstance(K, bytes): K=bytes_to_long(K) ciphertext=self._encrypt(plaintext, K) if wasString: return tuple(map(long_to_bytes, ciphertext)) else: return ciphertext def decrypt(self, ciphertext): wasString=0 if not isinstance(ciphertext, tuple): ciphertext=(ciphertext,) if isinstance(ciphertext[0], bytes): ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1 plaintext=self._decrypt(ciphertext) if wasString: return long_to_bytes(plaintext) else: return plaintext def sign(self, M, K): if (not self.has_private()): raise TypeError('Private key not available in this object') if isinstance(M, bytes): M=bytes_to_long(M) if isinstance(K, bytes): K=bytes_to_long(K) return self._sign(M, K)
MIT License
ali5h/rules_pip
third_party/py/pip/_internal/utils/subprocess.py
call_subprocess
python
def call_subprocess( cmd, show_stdout=False, cwd=None, on_returncode='raise', extra_ok_returncodes=None, command_desc=None, extra_environ=None, unset_environ=None, spinner=None, log_failed_cmd=True ): if extra_ok_returncodes is None: extra_ok_returncodes = [] if unset_environ is None: unset_environ = [] if show_stdout: log_subprocess = subprocess_logger.info used_level = logging.INFO else: log_subprocess = subprocess_logger.debug used_level = logging.DEBUG showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level use_spinner = not showing_subprocess and spinner is not None if command_desc is None: command_desc = format_command_args(cmd) log_subprocess("Running command %s", command_desc) env = os.environ.copy() if extra_environ: env.update(extra_environ) for name in unset_environ: env.pop(name, None) try: proc = subprocess.Popen( reveal_command_args(cmd), stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, env=env, ) assert proc.stdin assert proc.stdout proc.stdin.close() except Exception as exc: if log_failed_cmd: subprocess_logger.critical( "Error %s while executing command %s", exc, command_desc, ) raise all_output = [] while True: line = console_to_str(proc.stdout.readline()) if not line: break line = line.rstrip() all_output.append(line + '\n') log_subprocess(line) if use_spinner: assert spinner spinner.spin() try: proc.wait() finally: if proc.stdout: proc.stdout.close() proc_had_error = ( proc.returncode and proc.returncode not in extra_ok_returncodes ) if use_spinner: assert spinner if proc_had_error: spinner.finish("error") else: spinner.finish("done") if proc_had_error: if on_returncode == 'raise': if not showing_subprocess and log_failed_cmd: msg = make_subprocess_output_error( cmd_args=cmd, cwd=cwd, lines=all_output, exit_status=proc.returncode, ) subprocess_logger.error(msg) exc_msg = ( 'Command errored out with exit status {}: {} ' 'Check the logs for full command output.' ).format(proc.returncode, command_desc) raise InstallationError(exc_msg) elif on_returncode == 'warn': subprocess_logger.warning( 'Command "%s" had error code %s in %s', command_desc, proc.returncode, cwd, ) elif on_returncode == 'ignore': pass else: raise ValueError('Invalid value: on_returncode={!r}'.format( on_returncode)) return ''.join(all_output)
Args: show_stdout: if true, use INFO to log the subprocess's stderr and stdout streams. Otherwise, use DEBUG. Defaults to False. extra_ok_returncodes: an iterable of integer return codes that are acceptable, in addition to 0. Defaults to None, which means []. unset_environ: an iterable of environment variable names to unset prior to calling subprocess.Popen(). log_failed_cmd: if false, failed commands are not logged, only raised.
https://github.com/ali5h/rules_pip/blob/fb02cb7bf5c03bc8cd4269679e4aea2e1839b501/third_party/py/pip/_internal/utils/subprocess.py#L111-L253
from __future__ import absolute_import import logging import os import subprocess from pip._vendor.six.moves import shlex_quote from pip._internal.cli.spinners import SpinnerInterface, open_spinner from pip._internal.exceptions import InstallationError from pip._internal.utils.compat import console_to_str, str_to_display from pip._internal.utils.logging import subprocess_logger from pip._internal.utils.misc import HiddenText, path_to_display from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Any, Callable, Iterable, List, Mapping, Optional, Text, Union CommandArgs = List[Union[str, HiddenText]] LOG_DIVIDER = '----------------------------------------' def make_command(*args): command_args = [] for arg in args: if isinstance(arg, list): command_args.extend(arg) else: command_args.append(arg) return command_args def format_command_args(args): return ' '.join( shlex_quote(str(arg)) if isinstance(arg, HiddenText) else shlex_quote(arg) for arg in args ) def reveal_command_args(args): return [ arg.secret if isinstance(arg, HiddenText) else arg for arg in args ] def make_subprocess_output_error( cmd_args, cwd, lines, exit_status, ): command = format_command_args(cmd_args) command_display = str_to_display(command, desc='command bytes') cwd_display = path_to_display(cwd) output = ''.join(lines) msg = ( u'Command errored out with exit status {exit_status}:\n' ' command: {command_display}\n' ' cwd: {cwd_display}\n' 'Complete output ({line_count} lines):\n{output}{divider}' ).format( exit_status=exit_status, command_display=command_display, cwd_display=cwd_display, line_count=len(lines), output=output, divider=LOG_DIVIDER, ) return msg
MIT License
facebookresearch/reagent
reagent/gym/policies/policy.py
Policy.act
python
def act( self, obs: Any, possible_actions_mask: Optional[torch.Tensor] = None ) -> rlt.ActorOutput: scorer_inputs = (obs,) if possible_actions_mask is not None: scorer_inputs += (possible_actions_mask,) scores = self.scorer(*scorer_inputs) actor_output = self.sampler.sample_action(scores) return actor_output.cpu().detach()
Performs the composition described above. These are the actions being put into the replay buffer, not necessary the actions taken by the environment!
https://github.com/facebookresearch/reagent/blob/57b58a8b3a6b74bb87a197b73a6cd108ddad895e/reagent/gym/policies/policy.py#L24-L37
from typing import Any, Optional import reagent.core.types as rlt import torch from reagent.gym.types import Sampler, Scorer class Policy: def __init__(self, scorer: Scorer, sampler: Sampler): self.scorer = scorer self.sampler = sampler
BSD 3-Clause New or Revised License
eventbrite/pysoa
pysoa/client/expander.py
TypeNode.expansions
python
def expansions(self): return list(six.itervalues(self._expansions))
The type node's list of expansions.
https://github.com/eventbrite/pysoa/blob/8ab18b43f533574f2b235c734da309ab21176957/pysoa/client/expander.py#L193-L197
from __future__ import ( absolute_import, unicode_literals, ) from typing import ( Any, Dict, List, Optional, Union, cast, ) from conformity import fields from conformity.settings import ( Settings, SettingsSchema, ) import six __all__ = ( 'ExpansionConverter', 'ExpansionNode', 'Expansions', 'ExpansionSettings', 'TypeExpansions', 'TypeNode', 'TypeRoutes', ) class ExpansionSettings(Settings): schema = { 'type_routes': fields.SchemalessDictionary( key_type=fields.UnicodeString( description='The name of the expansion route, to be referenced from the `type_expansions` ' 'configuration', ), value_type=fields.Dictionary( { 'service': fields.UnicodeString( description='The name of the service to call to resolve this route', ), 'action': fields.UnicodeString( description='The name of the action to call to resolve this route, which must accept a single ' 'request field of type `List`, to which all the identifiers for matching candidate ' 'expansions will be passed, and which must return a single response field of type ' '`Dictionary`, from which all expansion objects will be obtained', ), 'request_field': fields.UnicodeString( description='The name of the `List` identifier field to place in the `ActionRequest` body when ' 'making the request to the named service and action', ), 'response_field': fields.UnicodeString( description='The name of the `Dictionary` field returned in the `ActionResponse`, from which ' 'the expanded objects will be extracted', ), }, description='The instructions for resolving this type route', ), description='The definition of all recognized types that can be expanded into and information about how ' 'to resolve objects of those types through action calls', ), 'type_expansions': fields.SchemalessDictionary( key_type=fields.UnicodeString( description='The name of the type for which the herein defined expansions can be sought, which will be ' "matched with a key from the `expansions` dict passed to one of `Client`'s `call_***` " 'methods, and which must also match the value of a `_type` field found on response objects ' 'on which extra data will be expanded', ), value_type=fields.SchemalessDictionary( key_type=fields.UnicodeString( description='The name of an expansion, which will be matched with a value from the `expansions` ' "dict passed to one of `Client`'s `call_***` methods corresponding to the type key in " 'that dict', ), value_type=fields.Dictionary( { 'type': fields.Nullable(fields.UnicodeString( description='The type of object this expansion yields, which must map back to a ' '`type_expansions` key in order to support nested/recursive expansions, and ' 'may be `None` if you do not wish to support nested/recursive expansions for ' 'this expansion', )), 'route': fields.UnicodeString( description='The route to use to resolve this expansion, which must match a key in the ' '`type_routes` configuration', ), 'source_field': fields.UnicodeString( description='The name of the field in the base object that contains the identifier used ' 'for obtaining the expansion object (the identifier will be passed to the ' '`request_field` in the route when resolving the expansion)', ), 'destination_field': fields.UnicodeString( description='The name of a not-already-existent field in the base object into which the ' 'expansion object will be placed after it is obtained from the route', ), 'raise_action_errors': fields.Boolean( description='Whether to raise action errors encountered when expanding objects these ' 'objects (by default, action errors are suppressed, which differs from the ' 'behavior of the `Client` to raise action errors during normal requests)', ), }, optional_keys=('raise_action_errors', ), description='The definition of one specific possible expansion for this object type', ), description='The definition of all possible expansions for this object type', ), description='The definition of all types that may contain identifiers that can be expanded into objects ' 'using the `type_routes` configurations', ), } class TypeNode(object): def __init__(self, node_type): self.type = node_type self._expansions = {} def add_expansion(self, expansion_node): existing_expansion_node = self.get_expansion(expansion_node.name) if existing_expansion_node: for child_expansion in expansion_node.expansions: existing_expansion_node.add_expansion(child_expansion) else: self._expansions[expansion_node.name] = expansion_node def get_expansion(self, expansion_name): return self._expansions.get(expansion_name) def find_objects(self, obj): objects = [] if isinstance(obj, dict): object_type = obj.get('_type') if object_type == self.type: objects.append(obj) else: for sub_object in six.itervalues(obj): objects.extend(self.find_objects(sub_object)) elif isinstance(obj, list): for sub_object in obj: objects.extend(self.find_objects(sub_object)) return objects @property
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_role_binding.py
V1RoleBinding.role_ref
python
def role_ref(self, role_ref): if self.local_vars_configuration.client_side_validation and role_ref is None: raise ValueError("Invalid value for `role_ref`, must not be `None`") self._role_ref = role_ref
Sets the role_ref of this V1RoleBinding. :param role_ref: The role_ref of this V1RoleBinding. # noqa: E501 :type: V1RoleRef
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_role_binding.py#L152-L162
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1RoleBinding(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'role_ref': 'V1RoleRef', 'subjects': 'list[V1Subject]' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'role_ref': 'roleRef', 'subjects': 'subjects' } def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._role_ref = None self._subjects = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata self.role_ref = role_ref if subjects is not None: self.subjects = subjects @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def metadata(self): return self._metadata @metadata.setter def metadata(self, metadata): self._metadata = metadata @property def role_ref(self): return self._role_ref @role_ref.setter
Apache License 2.0
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_0/gallery/gallery_client.py
GalleryClient.update_extension_by_id
python
def update_extension_by_id(self, extension_id): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') response = self._send(http_method='PUT', location_id='a41192c8-9525-4b58-bc86-179fa549d80d', version='5.0-preview.2', route_values=route_values) return self._deserialize('PublishedExtension', response)
UpdateExtensionById. [Preview API] :param str extension_id: :rtype: :class:`<PublishedExtension> <azure.devops.v5_0.gallery.models.PublishedExtension>`
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_0/gallery/gallery_client.py#L848-L861
 from msrest import Serializer, Deserializer from ...client import Client from . import models class GalleryClient(Client): def __init__(self, base_url=None, creds=None): super(GalleryClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '69d21c00-f135-441b-b5ce-3626378e0819' def share_extension_by_id(self, extension_id, account_name): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') if account_name is not None: route_values['accountName'] = self._serialize.url('account_name', account_name, 'str') self._send(http_method='POST', location_id='1f19631b-a0b4-4a03-89c2-d79785d24360', version='5.0-preview.1', route_values=route_values) def unshare_extension_by_id(self, extension_id, account_name): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') if account_name is not None: route_values['accountName'] = self._serialize.url('account_name', account_name, 'str') self._send(http_method='DELETE', location_id='1f19631b-a0b4-4a03-89c2-d79785d24360', version='5.0-preview.1', route_values=route_values) def share_extension(self, publisher_name, extension_name, account_name): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if account_name is not None: route_values['accountName'] = self._serialize.url('account_name', account_name, 'str') self._send(http_method='POST', location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46', version='5.0-preview.1', route_values=route_values) def unshare_extension(self, publisher_name, extension_name, account_name): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if account_name is not None: route_values['accountName'] = self._serialize.url('account_name', account_name, 'str') self._send(http_method='DELETE', location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46', version='5.0-preview.1', route_values=route_values) def get_acquisition_options(self, item_id, installation_target, test_commerce=None, is_free_or_trial_install=None): route_values = {} if item_id is not None: route_values['itemId'] = self._serialize.url('item_id', item_id, 'str') query_parameters = {} if installation_target is not None: query_parameters['installationTarget'] = self._serialize.query('installation_target', installation_target, 'str') if test_commerce is not None: query_parameters['testCommerce'] = self._serialize.query('test_commerce', test_commerce, 'bool') if is_free_or_trial_install is not None: query_parameters['isFreeOrTrialInstall'] = self._serialize.query('is_free_or_trial_install', is_free_or_trial_install, 'bool') response = self._send(http_method='GET', location_id='9d0a0105-075e-4760-aa15-8bcf54d1bd7d', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('AcquisitionOptions', response) def request_acquisition(self, acquisition_request): content = self._serialize.body(acquisition_request, 'ExtensionAcquisitionRequest') response = self._send(http_method='POST', location_id='3adb1f2d-e328-446e-be73-9f6d98071c45', version='5.0-preview.1', content=content) return self._deserialize('ExtensionAcquisitionRequest', response) def get_asset_by_name(self, publisher_name, extension_name, version, asset_type, account_token=None, accept_default=None, account_token_header=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') if accept_default is not None: query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool') additional_headers = {} if account_token_header is not None: additional_headers['X-Market-AccountToken'] = account_token_header response = self._send(http_method='GET', location_id='7529171f-a002-4180-93ba-685f358a0482', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, additional_headers=additional_headers, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_asset(self, extension_id, version, asset_type, account_token=None, accept_default=None, account_token_header=None, **kwargs): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') if accept_default is not None: query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool') additional_headers = {} if account_token_header is not None: additional_headers['X-Market-AccountToken'] = account_token_header response = self._send(http_method='GET', location_id='5d545f3d-ef47-488b-8be3-f5ee1517856c', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, additional_headers=additional_headers, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_asset_authenticated(self, publisher_name, extension_name, version, asset_type, account_token=None, account_token_header=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') additional_headers = {} if account_token_header is not None: additional_headers['X-Market-AccountToken'] = account_token_header response = self._send(http_method='GET', location_id='506aff36-2622-4f70-8063-77cce6366d20', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, additional_headers=additional_headers, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def associate_azure_publisher(self, publisher_name, azure_publisher_id): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') query_parameters = {} if azure_publisher_id is not None: query_parameters['azurePublisherId'] = self._serialize.query('azure_publisher_id', azure_publisher_id, 'str') response = self._send(http_method='PUT', location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('AzurePublisher', response) def query_associated_azure_publisher(self, publisher_name): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') response = self._send(http_method='GET', location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d', version='5.0-preview.1', route_values=route_values) return self._deserialize('AzurePublisher', response) def get_categories(self, languages=None): query_parameters = {} if languages is not None: query_parameters['languages'] = self._serialize.query('languages', languages, 'str') response = self._send(http_method='GET', location_id='e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a', version='5.0-preview.1', query_parameters=query_parameters) return self._deserialize('[str]', self._unwrap_collection(response)) def get_category_details(self, category_name, languages=None, product=None): route_values = {} if category_name is not None: route_values['categoryName'] = self._serialize.url('category_name', category_name, 'str') query_parameters = {} if languages is not None: query_parameters['languages'] = self._serialize.query('languages', languages, 'str') if product is not None: query_parameters['product'] = self._serialize.query('product', product, 'str') response = self._send(http_method='GET', location_id='75d3c04d-84d2-4973-acd2-22627587dabc', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('CategoriesResult', response) def get_category_tree(self, product, category_id, lcid=None, source=None, product_version=None, skus=None, sub_skus=None): route_values = {} if product is not None: route_values['product'] = self._serialize.url('product', product, 'str') if category_id is not None: route_values['categoryId'] = self._serialize.url('category_id', category_id, 'str') query_parameters = {} if lcid is not None: query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int') if source is not None: query_parameters['source'] = self._serialize.query('source', source, 'str') if product_version is not None: query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str') if skus is not None: query_parameters['skus'] = self._serialize.query('skus', skus, 'str') if sub_skus is not None: query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str') response = self._send(http_method='GET', location_id='1102bb42-82b0-4955-8d8a-435d6b4cedd3', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ProductCategory', response) def get_root_categories(self, product, lcid=None, source=None, product_version=None, skus=None, sub_skus=None): route_values = {} if product is not None: route_values['product'] = self._serialize.url('product', product, 'str') query_parameters = {} if lcid is not None: query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int') if source is not None: query_parameters['source'] = self._serialize.query('source', source, 'str') if product_version is not None: query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str') if skus is not None: query_parameters['skus'] = self._serialize.query('skus', skus, 'str') if sub_skus is not None: query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str') response = self._send(http_method='GET', location_id='31fba831-35b2-46f6-a641-d05de5a877d8', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ProductCategoriesResult', response) def get_certificate(self, publisher_name, extension_name, version=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') response = self._send(http_method='GET', location_id='e905ad6a-3f1f-4d08-9f6d-7d357ff8b7d0', version='5.0-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_content_verification_log(self, publisher_name, extension_name, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') response = self._send(http_method='GET', location_id='c0f1c7c4-3557-4ffb-b774-1e48c4865e99', version='5.0-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def create_draft_for_edit_extension(self, publisher_name, extension_name): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') response = self._send(http_method='POST', location_id='02b33873-4e61-496e-83a2-59d1df46b7d8', version='5.0-preview.1', route_values=route_values) return self._deserialize('ExtensionDraft', response) def perform_edit_extension_draft_operation(self, draft_patch, publisher_name, extension_name, draft_id): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') content = self._serialize.body(draft_patch, 'ExtensionDraftPatch') response = self._send(http_method='PATCH', location_id='02b33873-4e61-496e-83a2-59d1df46b7d8', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('ExtensionDraft', response) def update_payload_in_draft_for_edit_extension(self, upload_stream, publisher_name, extension_name, draft_id, file_name=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') additional_headers = {} if file_name is not None: additional_headers['X-Market-UploadFileName'] = file_name if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='02b33873-4e61-496e-83a2-59d1df46b7d8', version='5.0-preview.1', route_values=route_values, additional_headers=additional_headers, content=content, media_type='application/octet-stream') return self._deserialize('ExtensionDraft', response) def add_asset_for_edit_extension_draft(self, upload_stream, publisher_name, extension_name, draft_id, asset_type, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='f1db9c47-6619-4998-a7e5-d7f9f41a4617', version='5.0-preview.1', route_values=route_values, content=content, media_type='application/octet-stream') return self._deserialize('ExtensionDraftAsset', response) def create_draft_for_new_extension(self, upload_stream, publisher_name, product, file_name=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') additional_headers = {} if product is not None: additional_headers['X-Market-UploadFileProduct'] = product if file_name is not None: additional_headers['X-Market-UploadFileName'] = file_name if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='POST', location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79', version='5.0-preview.1', route_values=route_values, additional_headers=additional_headers, content=content, media_type='application/octet-stream') return self._deserialize('ExtensionDraft', response) def perform_new_extension_draft_operation(self, draft_patch, publisher_name, draft_id): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') content = self._serialize.body(draft_patch, 'ExtensionDraftPatch') response = self._send(http_method='PATCH', location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('ExtensionDraft', response) def update_payload_in_draft_for_new_extension(self, upload_stream, publisher_name, draft_id, file_name=None, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') additional_headers = {} if file_name is not None: additional_headers['X-Market-UploadFileName'] = file_name if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79', version='5.0-preview.1', route_values=route_values, additional_headers=additional_headers, content=content, media_type='application/octet-stream') return self._deserialize('ExtensionDraft', response) def add_asset_for_new_extension_draft(self, upload_stream, publisher_name, draft_id, asset_type, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7', version='5.0-preview.1', route_values=route_values, content=content, media_type='application/octet-stream') return self._deserialize('ExtensionDraftAsset', response) def get_asset_from_edit_extension_draft(self, publisher_name, draft_id, asset_type, extension_name, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') query_parameters = {} if extension_name is not None: query_parameters['extensionName'] = self._serialize.query('extension_name', extension_name, 'str') response = self._send(http_method='GET', location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_asset_from_new_extension_draft(self, publisher_name, draft_id, asset_type, **kwargs): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') response = self._send(http_method='GET', location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7', version='5.0-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_extension_events(self, publisher_name, extension_name, count=None, after_date=None, include=None, include_property=None): route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') query_parameters = {} if count is not None: query_parameters['count'] = self._serialize.query('count', count, 'int') if after_date is not None: query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601') if include is not None: query_parameters['include'] = self._serialize.query('include', include, 'str') if include_property is not None: query_parameters['includeProperty'] = self._serialize.query('include_property', include_property, 'str') response = self._send(http_method='GET', location_id='3d13c499-2168-4d06-bef4-14aba185dcd5', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ExtensionEvents', response) def publish_extension_events(self, extension_events): content = self._serialize.body(extension_events, '[ExtensionEvents]') self._send(http_method='POST', location_id='0bf2bd3a-70e0-4d5d-8bf7-bd4a9c2ab6e7', version='5.0-preview.1', content=content) def query_extensions(self, extension_query, account_token=None, account_token_header=None): query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') additional_headers = {} if account_token_header is not None: additional_headers['X-Market-AccountToken'] = account_token_header content = self._serialize.body(extension_query, 'ExtensionQuery') response = self._send(http_method='POST', location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6', version='5.0-preview.1', query_parameters=query_parameters, additional_headers=additional_headers, content=content) return self._deserialize('ExtensionQueryResult', response) def create_extension(self, upload_stream, **kwargs): if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='POST', location_id='a41192c8-9525-4b58-bc86-179fa549d80d', version='5.0-preview.2', content=content, media_type='application/octet-stream') return self._deserialize('PublishedExtension', response) def delete_extension_by_id(self, extension_id, version=None): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') query_parameters = {} if version is not None: query_parameters['version'] = self._serialize.query('version', version, 'str') self._send(http_method='DELETE', location_id='a41192c8-9525-4b58-bc86-179fa549d80d', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) def get_extension_by_id(self, extension_id, version=None, flags=None): route_values = {} if extension_id is not None: route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') query_parameters = {} if version is not None: query_parameters['version'] = self._serialize.query('version', version, 'str') if flags is not None: query_parameters['flags'] = self._serialize.query('flags', flags, 'str') response = self._send(http_method='GET', location_id='a41192c8-9525-4b58-bc86-179fa549d80d', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('PublishedExtension', response)
MIT License
atmtools/typhon
typhon/files/handlers/common.py
FileInfo.__init__
python
def __init__(self, path=None, times=None, attr=None, fs=None): super(FileInfo, self).__init__() self._path = None self.path = path self._times = None self.times = times if attr is None: self.attr = {} else: self.attr = attr self.file_system = fs or LocalFileSystem()
Initialise a FileInfo object. Args: path: Absolute path to a file. times: A list or tuple of two datetime objects indicating start and end time of the file. attr: A dictionary with further attributes. fs: Implementation of fsspec file system
https://github.com/atmtools/typhon/blob/815dcb1d7cb2718ffe81cd08386739438e7782cc/typhon/files/handlers/common.py#L294-L317
from collections import defaultdict from copy import copy from datetime import datetime from functools import wraps import glob from inspect import signature, ismethod import os import pickle import warnings import netCDF4 import pandas as pd import xarray as xr import numpy as np from fsspec.implementations.local import LocalFileSystem pyhdf_is_installed = False try: from pyhdf import HDF, VS, V from pyhdf.SD import SD, SDC pyhdf_is_installed = True except ImportError: pass h5py_is_installed = False try: import h5py h5py_is_installed = True except ImportError: pass __all__ = [ 'CSV', 'FileHandler', 'FileInfo', 'HDF4', 'HDF5', 'NetCDF4', 'Plotter', 'expects_file_info', ] def parametrized(dec): def layer(*args, **kwargs): def repl(f): return dec(f, *args, **kwargs) return repl return layer @parametrized def expects_file_info(method, pos=None, key=None): if pos is None and key is None: pos = 1 @wraps(method) def wrapper(*args, **kwargs): args = list(args) if args and pos is not None: if not isinstance(args[pos], FileInfo): args[pos] = FileInfo(args[pos]) else: if not isinstance(kwargs[key], FileInfo): kwargs[key] = FileInfo(kwargs[key]) return method(*args, **kwargs) return wrapper def _xarray_rename_fields(dataset, mapping): if mapping is not None: names = set(dataset.dims.keys()) | set(dataset.variables.keys()) mapping = { old_name: new_name for old_name, new_name in mapping.items() if old_name in names } dataset = dataset.rename(mapping) return dataset class FileHandler: def __init__( self, reader=None, info=None, writer=None, **kwargs): self.reader = reader self.info = info self.writer = writer self.stack_dims = {} @expects_file_info() def get_info(self, filename, **kwargs): if self.info is not None: number_args = 1 + int(ismethod(self.info)) if len(signature(self.info).parameters) > number_args: return self.info(filename, **kwargs) else: return self.info(filename) raise NotImplementedError( "This file handler does not support reading data from a file. You " "should use a different file handler.") @expects_file_info() def read(self, filename, **kwargs): if self.reader is not None: number_args = 1 + int(ismethod(self.reader)) if len(signature(self.reader).parameters) > number_args: return self.reader(filename, **kwargs) else: return self.reader(filename) raise NotImplementedError( "This file handler does not support reading data from a file. You " "should use a different file handler.") @expects_file_info(pos=2) def write(self, data, filename, **kwargs): if self.writer is not None: if len(signature(self.writer).parameters) > 2: self.writer(data, filename, **kwargs) else: self.writer(data, filename) return None raise NotImplementedError( "This file handler does not support writing data to a file. You " "should use a different file handler.") def _ensure_local_filesystem(self, file_info): if not isinstance(file_info.file_system, LocalFileSystem): raise NotImplementedError( f"File handler {type(self).__name__:s} can only " "read from local file system, not from " f"{str(type(file_info.file_system).__name__)}") class FileInfo(os.PathLike):
MIT License
pystage/pystage
src/pystage/en/stage.py
Stage.when_this_sprite_clicked
python
def when_this_sprite_clicked(self, generator_function, name='', no_refresh=False): return self._core.event_whenthisspriteclicked(generator_function, name, no_refresh)
when this sprite clicked Translation string: when this sprite clicked Engl. Translation for your reference: ... Engl. Documentation when available... Parameters ---------- generator_function : FILL name : FILL no_refresh : FILL Returns -------
https://github.com/pystage/pystage/blob/4a76e95f6de2df59736de17fe81219485fde1556/src/pystage/en/stage.py#L369-L388
from pystage.core.stage import CoreStage from pystage.en.sprite import Sprite class Stage(): def __init__(self): self._core = CoreStage() self._core.facade = self self._core.sprite_facade_class = Sprite def add_a_sprite(self, costume="default"): return self._core.pystage_createsprite(costume=costume) def play(self): self._core.pystage_play() def create_clone_of(self, sprite='_myself_'): return self._core.control_create_clone_of(sprite) def stop_all(self): return self._core.control_stop_all() def stop_other_scripts_in_sprite(self): return self._core.control_stop_other() def stop_this_script(self): return self._core.control_stop_this() def wait_seconds(self, secs): return self._core.control_wait(secs) def change_by(self, name, value): return self._core.data_changevariableby(name, value) def hide_variable(self, name): return self._core.data_hidevariable(name) def set_variable(self, name, value): return self._core.data_setvariableto(name, value) def show_variable(self, name): return self._core.data_showvariable(name) def get_variable(self, name): return self._core.data_variable(name) def broadcast(self, message): return self._core.event_broadcast(message) def broadcast_and_wait(self, message): return self._core.event_broadcastandwait(message) def when_backdrop_switches_to(self, backdrop, generator_function, name='', no_refresh=False): return self._core.event_whenbackdropswitchesto(backdrop, generator_function, name, no_refresh) def when_i_receive(self, message, generator_function, name='', no_refresh=False): return self._core.event_whenbroadcastreceived(message, generator_function, name, no_refresh) def when_GREENFLAG_clicked(self, generator_function, name='', no_refresh=False): return self._core.event_whenflagclicked(generator_function, name, no_refresh) def when_loudness_GREATERTHAN(self, value, generator_function, name='', no_refresh=False): return self._core.event_whengreaterthan_loudness(value, generator_function, name, no_refresh) def when_timer_GREATERTHAN(self, value, generator_function, name='', no_refresh=False): return self._core.event_whengreaterthan_timer(value, generator_function, name, no_refresh) def when_key_pressed(self, key, generator_function, name='', no_refresh=False): return self._core.event_whenkeypressed(key, generator_function, name, no_refresh)
MIT License
virtuesecurity/aws-extender
BappModules/boto/ec2/instance.py
Instance.use_ip
python
def use_ip(self, ip_address, dry_run=False): if isinstance(ip_address, Address): ip_address = ip_address.public_ip return self.connection.associate_address( self.id, ip_address, dry_run=dry_run )
Associates an Elastic IP to the instance. :type ip_address: Either an instance of :class:`boto.ec2.address.Address` or a string. :param ip_address: The IP address to associate with the instance. :rtype: bool :return: True if successful
https://github.com/virtuesecurity/aws-extender/blob/3029dd26bd7bdf7f4148e1e92adf9f8c547cafbe/BappModules/boto/ec2/instance.py#L471-L490
import boto from boto.ec2.ec2object import EC2Object, TaggedEC2Object from boto.resultset import ResultSet from boto.ec2.address import Address from boto.ec2.blockdevicemapping import BlockDeviceMapping from boto.ec2.image import ProductCodes from boto.ec2.networkinterface import NetworkInterface from boto.ec2.group import Group import base64 class InstanceState(object): def __init__(self, code=0, name=None): self.code = code self.name = name def __repr__(self): return '%s(%d)' % (self.name, self.code) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'code': self.code = int(value) elif name == 'name': self.name = value else: setattr(self, name, value) class InstancePlacement(object): def __init__(self, zone=None, group_name=None, tenancy=None): self.zone = zone self.group_name = group_name self.tenancy = tenancy def __repr__(self): return self.zone def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'availabilityZone': self.zone = value elif name == 'groupName': self.group_name = value elif name == 'tenancy': self.tenancy = value else: setattr(self, name, value) class Reservation(EC2Object): def __init__(self, connection=None): super(Reservation, self).__init__(connection) self.id = None self.owner_id = None self.groups = [] self.instances = [] def __repr__(self): return 'Reservation:%s' % self.id def startElement(self, name, attrs, connection): if name == 'instancesSet': self.instances = ResultSet([('item', Instance)]) return self.instances elif name == 'groupSet': self.groups = ResultSet([('item', Group)]) return self.groups else: return None def endElement(self, name, value, connection): if name == 'reservationId': self.id = value elif name == 'ownerId': self.owner_id = value else: setattr(self, name, value) def stop_all(self, dry_run=False): for instance in self.instances: instance.stop(dry_run=dry_run) class Instance(TaggedEC2Object): def __init__(self, connection=None): super(Instance, self).__init__(connection) self.id = None self.dns_name = None self.public_dns_name = None self.private_dns_name = None self.key_name = None self.instance_type = None self.launch_time = None self.image_id = None self.kernel = None self.ramdisk = None self.product_codes = ProductCodes() self.ami_launch_index = None self.monitored = False self.monitoring_state = None self.spot_instance_request_id = None self.subnet_id = None self.vpc_id = None self.private_ip_address = None self.ip_address = None self.requester_id = None self._in_monitoring_element = False self.persistent = False self.root_device_name = None self.root_device_type = None self.block_device_mapping = None self.state_reason = None self.group_name = None self.client_token = None self.eventsSet = None self.groups = [] self.platform = None self.interfaces = [] self.hypervisor = None self.virtualization_type = None self.architecture = None self.instance_profile = None self._previous_state = None self._state = InstanceState() self._placement = InstancePlacement() def __repr__(self): return 'Instance:%s' % self.id @property def state(self): return self._state.name @property def state_code(self): return self._state.code @property def previous_state(self): if self._previous_state: return self._previous_state.name return None @property def previous_state_code(self): if self._previous_state: return self._previous_state.code return 0 @property def placement(self): return self._placement.zone @property def placement_group(self): return self._placement.group_name @property def placement_tenancy(self): return self._placement.tenancy def startElement(self, name, attrs, connection): retval = super(Instance, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'monitoring': self._in_monitoring_element = True elif name == 'blockDeviceMapping': self.block_device_mapping = BlockDeviceMapping() return self.block_device_mapping elif name == 'productCodes': return self.product_codes elif name == 'stateReason': self.state_reason = SubParse('stateReason') return self.state_reason elif name == 'groupSet': self.groups = ResultSet([('item', Group)]) return self.groups elif name == "eventsSet": self.eventsSet = SubParse('eventsSet') return self.eventsSet elif name == 'networkInterfaceSet': self.interfaces = ResultSet([('item', NetworkInterface)]) return self.interfaces elif name == 'iamInstanceProfile': self.instance_profile = SubParse('iamInstanceProfile') return self.instance_profile elif name == 'currentState': return self._state elif name == 'previousState': self._previous_state = InstanceState() return self._previous_state elif name == 'instanceState': return self._state elif name == 'placement': return self._placement return None def endElement(self, name, value, connection): if name == 'instanceId': self.id = value elif name == 'imageId': self.image_id = value elif name == 'dnsName' or name == 'publicDnsName': self.dns_name = value self.public_dns_name = value elif name == 'privateDnsName': self.private_dns_name = value elif name == 'keyName': self.key_name = value elif name == 'amiLaunchIndex': self.ami_launch_index = value elif name == 'previousState': self.previous_state = value elif name == 'instanceType': self.instance_type = value elif name == 'rootDeviceName': self.root_device_name = value elif name == 'rootDeviceType': self.root_device_type = value elif name == 'launchTime': self.launch_time = value elif name == 'platform': self.platform = value elif name == 'kernelId': self.kernel = value elif name == 'ramdiskId': self.ramdisk = value elif name == 'state': if self._in_monitoring_element: self.monitoring_state = value if value == 'enabled': self.monitored = True self._in_monitoring_element = False elif name == 'spotInstanceRequestId': self.spot_instance_request_id = value elif name == 'subnetId': self.subnet_id = value elif name == 'vpcId': self.vpc_id = value elif name == 'privateIpAddress': self.private_ip_address = value elif name == 'ipAddress': self.ip_address = value elif name == 'requesterId': self.requester_id = value elif name == 'persistent': if value == 'true': self.persistent = True else: self.persistent = False elif name == 'groupName': if self._in_monitoring_element: self.group_name = value elif name == 'clientToken': self.client_token = value elif name == "eventsSet": self.events = value elif name == 'hypervisor': self.hypervisor = value elif name == 'virtualizationType': self.virtualization_type = value elif name == 'architecture': self.architecture = value elif name == 'ebsOptimized': self.ebs_optimized = (value == 'true') else: setattr(self, name, value) def _update(self, updated): self.__dict__.update(updated.__dict__) def update(self, validate=False, dry_run=False): rs = self.connection.get_all_reservations([self.id], dry_run=dry_run) if len(rs) > 0: r = rs[0] for i in r.instances: if i.id == self.id: self._update(i) elif validate: raise ValueError('%s is not a valid Instance ID' % self.id) return self.state def terminate(self, dry_run=False): rs = self.connection.terminate_instances([self.id], dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def stop(self, force=False, dry_run=False): rs = self.connection.stop_instances([self.id], force, dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def start(self, dry_run=False): rs = self.connection.start_instances([self.id], dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def reboot(self, dry_run=False): return self.connection.reboot_instances([self.id], dry_run=dry_run) def get_console_output(self, dry_run=False): return self.connection.get_console_output(self.id, dry_run=dry_run) def confirm_product(self, product_code, dry_run=False): return self.connection.confirm_product_instance( self.id, product_code, dry_run=dry_run )
MIT License
diofant/diofant
diofant/sets/sets.py
Set._infimum_key
python
def _infimum_key(expr): try: infimum = expr.inf assert infimum.is_comparable except (NotImplementedError, AttributeError, AssertionError, ValueError): infimum = oo return infimum
Return infimum (if possible) else oo.
https://github.com/diofant/diofant/blob/05c50552b0e0533f1dbf2ec05e65b6c45b7e2c11/diofant/sets/sets.py#L48-L56
import itertools import typing from mpmath import mpf, mpi from ..core import Basic, Eq, Expr, Mul, S, nan, oo, zoo from ..core.compatibility import iterable from ..core.decorators import _sympifyit from ..core.evalf import EvalfMixin from ..core.evaluate import global_evaluate from ..core.singleton import Singleton from ..core.sympify import sympify from ..logic import And, Not, Or, false, true from ..utilities import ordered, subsets from .contains import Contains class Set(Basic): is_number = False is_iterable = False is_interval = False is_FiniteSet = False is_Interval = False is_ProductSet = False is_Union = False is_Intersection: typing.Optional[bool] = None is_EmptySet: typing.Optional[bool] = None is_UniversalSet: typing.Optional[bool] = None is_Complement: typing.Optional[bool] = None is_SymmetricDifference: typing.Optional[bool] = None @staticmethod
BSD 3-Clause New or Revised License
zachchristensen28/ta-opnsense
bin/ta_opnsense/aob_py3/cloudconnectlib/core/ext.py
regex_not_match
python
def regex_not_match(pattern, source, flags=0): return not regex_match(pattern, source, flags)
Determine whether a string is not match a regex pattern. :param pattern: regex expression :param source: candidate to match regex :param flags: flags for regex match :return: `True` if candidate not match pattern else `False`
https://github.com/zachchristensen28/ta-opnsense/blob/fc736f4c6f0fa7866b4f6d2dcf9761b6b693d6cf/bin/ta_opnsense/aob_py3/cloudconnectlib/core/ext.py#L69-L78
from builtins import str from builtins import range import calendar import json import re import traceback from collections import Iterable from datetime import datetime import six from jsonpath_ng import parse from .exceptions import FuncException, StopCCEIteration, QuitJobError from .pipemgr import PipeManager from ..common import util, log _logger = log.get_cc_logger() def regex_search(pattern, source, flags=0): if not isinstance(source, six.string_types): _logger.warning('Cannot apply regex search on non-string: %s', type(source)) return {} try: matches = re.search(pattern=pattern, string=source, flags=flags) except Exception: _logger.warning('Unable to search pattern=%s and flags=%s in string, error=%s', pattern, flags, traceback.format_exc()) return {} else: return matches.groupdict() if matches else {} def regex_match(pattern, source, flags=0): try: return re.match(pattern, source, flags) is not None except Exception: _logger.warning( 'Unable to match source with pattern=%s, cause=%s', pattern, traceback.format_exc() ) return False
MIT License
elad661/picdescbot
picdescbot/common.py
NonClosingBytesIO.close
python
def close(self, really=False): if really: return super().close()
Close the BytesIO object, but only if you're really sure
https://github.com/elad661/picdescbot/blob/07d50d32201f4cd52ef1435e716ecf59624694f1/picdescbot/common.py#L329-L332
from __future__ import unicode_literals, absolute_import, print_function from wordfilter import Wordfilter import json import re import requests import time import lxml.html from . import logger from io import BytesIO log = logger.get("common") MEDIAWIKI_API = "https://commons.wikimedia.org/w/api.php" HEADERS = {"User-Agent": "picdescbot, http://github.com/elad661/picdescbot"} supported_formats = re.compile('\.(png|jpe?g|gif)$', re.I) word_filter = Wordfilter() word_filter.add_words(['nazi', 'hitler', 'reich']) extra_filter = {'ape', 'apes', 'monkey', 'monkeys', 'gun'} blacklisted_phrases = {'comic strip', 'logo', 'biblical illustration', 'church', 'historical document', 'donald trump'} category_blacklist = ['september 11', 'hitler', 'nazi', 'antisemit', 'libel', 'apartheid', 'racism', 'lynching', 'cartoons', 'holocaust', 'auschwitz', 'stereotypes', 'flags', 'porn', 'homophobia', 'transphobia', 'logos', 'scans from google books', 'little nemo', 'stolperstein', 'songbird specimens', 'terror', 'bible illustrations', 'jesuit symbols', 'christian symbols', 'symbols of religion', 'symbols of islam', 'jewish symbols', 'pistols', 'corpse', 'victim', 'ultrasound', 'donald trump', 'pascual marín'] gendered_words = {'woman': 'person', 'man': 'person', 'women': 'people', 'man\'s': 'person\'s', 'woman\'s': 'person\'s', 'mans': 'persons', 'womans': 'persons', 'men': 'people', 'guy': 'person', 'boy': 'person', 'girl': 'person', 'boys': 'people', 'girls': 'people', 'lady': 'person', 'ladies': 'people', 'gentleman': 'person', 'gentlemen': 'people', 'female': '', 'male': '', 'she': 'they', 'her': 'their', 'hers': 'theirs', 'herself': 'themself', 'he': 'they', 'him': 'them', 'his': 'their', 'himself': 'themself'} def gender_neutralize(phrase): neutralized = [] for word in phrase.lower().split(): if word in gendered_words: word = gendered_words[word] if word != '': neutralized.append(word) neutralized = ' '.join(neutralized) if neutralized != phrase: log.info('Gender neutralized: "{0}" => "{1}"'.format(phrase, neutralized)) return neutralized tags_blacklist = {'text', 'screenshot', 'military', 'church'} def tag_blacklisted(tags): for tag in tags: if tag in tags_blacklist: return True return False def is_blacklisted(caption): if "a suit and tie" in caption: return True if word_filter.blacklisted(caption): return True for word in caption.split(): if word in extra_filter: return True return False def remove_html_tags(text): return ' '.join(lxml.html.fromstring(text).itertext()) def log_discarded(url, reason, description=None): line = "Discarded {0} because of {1}".format(url, reason) if description is not None: line += ' - "{0}"'.format(description) log.warning(line) def get_picture(filename=None): params = {"action": "query", "prop": "imageinfo|categories|globalusage", "iiprop": "url|size|extmetadata|mediatype", "iiurlheight": "1080", "format": "json"} if filename is None: params['generator'] = 'random' params['grnnamespace'] = '6' else: params['titles'] = 'File:%s' % filename response = requests.get(MEDIAWIKI_API, params=params, headers=HEADERS).json() page = list(response['query']['pages'].values())[0] imageinfo = page['imageinfo'][0] url = imageinfo['url'] extra_metadata = imageinfo['extmetadata'] if imageinfo['mediatype'] != "BITMAP": return None if imageinfo['width'] <= 50 or imageinfo['height'] <= 50: return None if not supported_formats.search(url): return None if word_filter.blacklisted(page['title']): log_discarded(url, 'badword in page title: "{0}"'.format(page['title'])) return None if word_filter.blacklisted(extra_metadata['ObjectName']['value']): log_discarded(url, 'badword in picture title: "{0}"'.format(extra_metadata['ObjectName']['value'])) return None if word_filter.blacklisted(extra_metadata['Restrictions']['value']): log_discarded(url, 'badword in restrictions: "{0}"'.format(extra_metadata['Restrictions']['value'])) return None if 'ImageDescription' in extra_metadata: cleaned_description = remove_html_tags(extra_metadata['ImageDescription']['value']) if word_filter.blacklisted(cleaned_description): log_discarded(url, 'badword in image description: "{0}"'.format(cleaned_description)) return None for phrase in blacklisted_phrases: if phrase in cleaned_description.lower().strip(): log_discarded(url, 'blacklisted phrase "{0}" found in description "{1}"'.format(phrase, cleaned_description)) return None extra_categories = extra_metadata['Categories']['value'].lower() for blacklisted_category in category_blacklist: for category in page['categories']: if blacklisted_category in category['title'].lower(): log_discarded(url, 'blacklisted category "{0}"'.format(category['title'])) return None if blacklisted_category in extra_categories: log_discarded(url, 'blacklisted category "{0}" (in extra)'.format(blacklisted_category)) return None for wikipage in page['globalusage']: if word_filter.blacklisted(wikipage['title'].lower()): log_discarded(url, 'page usage "{0}"'.format(wikipage['title'])) return None for blacklisted_category in category_blacklist: if blacklisted_category in wikipage['title']: log_discarded(url, 'page usage "{0}"'.format(wikipage['title'])) return None return imageinfo class CVAPIClient(object): def __init__(self, apikey, endpoint): self.apikey = apikey self.endpoint = endpoint + '/analyze' def describe_picture(self, url): params = {'visualFeatures': 'Description,Adult'} json = {'url': url} headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': self.apikey} result = None retries = 0 while retries < 15 and not result: response = requests.post(self.endpoint, json=json, params=params, headers=headers) if response.status_code == 429: log.error("Error from mscognitive: %s" % (response.json())) if retries < 15: time.sleep(2) retries += 1 else: log.error('failed after retrying!') elif response.status_code == 200 or response.status_code == 201: result = response.json() if response.content else None else: log.error("Error code: %d" % (response.status_code)) log.error("url: %s" % url) try: log.error(response.json()) except: log.error(response.text) retries += 1 sleep = 20 + retries*4 log.info("attempt: {0}, sleeping for {1}".format(retries, sleep)) time.sleep(sleep) return result def get_picture_and_description(self, filename=None, max_retries=20): pic = None retries = 0 while retries <= max_retries: while pic is None: pic = get_picture(filename) if pic is None: time.sleep(1) url = pic['url'] if pic['size'] > 3000000 or pic['width'] > 8192 or pic['height'] > 8192: url = pic['thumburl'] result = self.describe_picture(url) if result is not None: description = result['description'] adult = result['adult'] if not adult['isAdultContent'] and not adult['isRacyContent']: if len(description['captions']) > 0: caption = description['captions'][0]['text'] caption = gender_neutralize(caption) if not is_blacklisted(caption): if not tag_blacklisted(description['tags']): return Result(caption, description['tags'], url, pic['descriptionshorturl']) else: log_discarded(url, "tag blacklist", caption) log.warning('tags: %s' % description['tags']) else: log_discarded(url, "caption blacklist", caption) else: log.warning("No caption for url: {0}".format(url)) else: log_discarded(url, "adult content", description['captions']) retries += 1 log.warning("Not good, retrying...") pic = None time.sleep(3) raise Exception("Maximum retries exceeded, no good picture") class NonClosingBytesIO(BytesIO):
MIT License
trp07/messages
tests/test_telegram.py
get_tgram
python
def get_tgram(): return TelegramBot(auth='34563:ABCDEFG', chat_id='123456', body='message', attachments=['https://url1.com', 'https://url2.com'])
Return a valid TelegramBot object.
https://github.com/trp07/messages/blob/1e347dfe4dc5bb3776334fbfc9b3a04c8ed7007b/tests/test_telegram.py#L16-L19
import pytest import httpx import messages.telegram from messages.telegram import TelegramBot from messages._exceptions import MessageSendError @pytest.fixture()
MIT License
swizzlevixen/letterboxd
letterboxd/services/list.py
Lists.create_list
python
def create_list(self, list_creation_request=None): response = self._api.api_call( path="lists", params=list_creation_request, method="POST" ) list_create_response = response.json() logging.debug(list_create_response) return list_create_response
[POST] /lists Create a list. Calls to this endpoint must include the access token for an authenticated member. :param list_creation_request: dict - ListCreationRequest :return: dict - ListCreateResponse
https://github.com/swizzlevixen/letterboxd/blob/4a39ad57ce9dd31f95626f6a943378aaae9cde30/letterboxd/services/list.py#L247-L264
import logging logging.getLogger(__name__) class List(object): def __init__(self, api, list_id=None): self._api = api self._list_id = list_id def details(self, list_id=None): if list_id is None: list_id = self._list_id response = self._api.api_call(path=f"list/{list_id}") return response.json() def update(self, list_id=None, list_update_request=None): if list_id is None: list_id = self._list_id response = self._api.api_call( path=f"list/{list_id}", method="PATCH", params=list_update_request ) return response.json() def delete(self, list_id=None): response = self._api.api_call( path=f"list/{list_id}", method="DELETE", params={} ) if response.status_code is 204: return True else: return False def comments(self, list_id=None, comments_request=None): if list_id is None: list_id = self._list_id response = self._api.api_call(path=f"list/{list_id}/comments") list_comments_response = response.json() return list_comments_response def create_comment(self, list_id=None, comment_creation_request=None): if list_id is None: list_id = self._list_id response = self._api.api_call( path=f"list/{list_id}/comments", method="POST", params=comment_creation_request, ) list_comment = response.json() return list_comment def entries(self, list_id=None, list_entries_request=None): if list_id is None: list_id = self._list_id response = self._api.api_call( path=f"list/{list_id}/entries", params=list_entries_request ) list_entries_response = response.json() return list_entries_response def me(self, list_id=None): if list_id is None: list_id = self._list_id response = self._api.api_call(path=f"list/{list_id}/me") list_relationship = response.json() return list_relationship def me_update(self, list_id=None, list_relationship_update_request=None): if list_id is None: list_id = self._list_id response = self._api.api_call( path=f"list/{list_id}/me", method="PATCH", params=list_relationship_update_request, ) list_relationship_update_response = response.json() return list_relationship_update_response def report(self, list_id=None, report_list_request=None): response = self._api.api_call( path=f"list/{list_id}/report", params=report_list_request, method="POST" ) if response.status_code is 204: return True else: return False def statistics(self, list_id=None): if list_id is None: list_id = self._list_id response = self._api.api_call(path=f"list/{list_id}/statistics") list_statistics = response.json() return list_statistics class Lists(object): def __init__(self, api): self._api = api def lists(self, lists_request=None): response = self._api.api_call(path="lists", params=lists_request) lists_response = response.json() logging.debug(lists_response) return lists_response
MIT License
gmr/mikkoo
mikkoo/mcp.py
MasterControlProgram.__init__
python
def __init__(self, config): self.set_process_name() LOGGER.info('Mikkoo v%s initializing', __version__) super(MasterControlProgram, self).__init__() self.config = config self.last_poll_results = dict() self.poll_data = {'time': 0, 'processes': list()} self.poll_timer = None self.results_timer = None self.stats = dict() self.stats_queue = multiprocessing.Queue() self.polled = False self.workers = dict() for name in config.application.workers.keys(): self.workers[name] = Worker(config.application.workers[name], self.stats_queue) self.poll_interval = config.application.get('poll_interval', self.POLL_INTERVAL)
Initialize the Master Control Program :param helper.config.Config config: Mikkoo Configuration
https://github.com/gmr/mikkoo/blob/1809528f61ca70f222bd0785e7d85a866d27b0a6/mikkoo/mcp.py#L45-L70
import logging import multiprocessing import os import psutil try: import Queue as queue except ImportError: import queue import signal import sys import time from mikkoo import state from mikkoo import worker from mikkoo import __version__ LOGGER = logging.getLogger(__name__) class Worker(object): def __init__(self, config, stats_queue): self.config = config self.process = None self.stats_queue = stats_queue self.unresponsive = 0 class MasterControlProgram(state.State): MAX_UNRESPONSIVE = 3 MAX_SHUTDOWN_WAIT = 10 POLL_INTERVAL = 60.0 POLL_RESULTS_INTERVAL = 3.0 SHUTDOWN_WAIT = 1
BSD 3-Clause New or Revised License
elfi-dev/elfi
elfi/store.py
OutputPool.__getitem__
python
def __getitem__(self, batch_index): return self.get_batch(batch_index)
Return the batch.
https://github.com/elfi-dev/elfi/blob/07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c/elfi/store.py#L218-L220
import io import logging import os import pickle import shutil import numpy as np import numpy.lib.format as npformat logger = logging.getLogger(__name__) _default_prefix = 'pools' class OutputPool: _pkl_name = '_outputpool.pkl' def __init__(self, outputs=None, name=None, prefix=None): if outputs is None: stores = {} elif isinstance(outputs, dict): stores = outputs else: stores = dict.fromkeys(outputs) self.stores = stores self.batch_size = None self.seed = None self.name = name self.prefix = prefix or _default_prefix if self.path and os.path.exists(self.path): raise ValueError("A pool with this name already exists in {}. You can use " "OutputPool.open() to open it.".format(self.prefix)) @property def output_names(self): return list(self.stores.keys()) @property def has_context(self): return self.seed is not None and self.batch_size is not None def set_context(self, context): if self.has_context: raise ValueError('Context is already set') self.batch_size = context.batch_size self.seed = context.seed if self.name is None: self.name = "{}_{}".format(self.__class__.__name__.lower(), self.seed) def get_batch(self, batch_index, output_names=None): output_names = output_names or self.output_names batch = dict() for output in output_names: store = self.stores[output] if store is None: continue if batch_index in store: batch[output] = store[batch_index] return batch def add_batch(self, batch, batch_index): for node, values in batch.items(): if node not in self.stores: continue store = self._get_store_for(node) if batch_index in store: continue store[batch_index] = values def remove_batch(self, batch_index): for store in self.stores.values(): if batch_index in store: del store[batch_index] def has_store(self, node): return node in self.stores def get_store(self, node): return self.stores[node] def add_store(self, node, store=None): if node in self.stores and self.stores[node] is not None: raise ValueError("Store for '{}' already exists".format(node)) store = store if store is not None else self._make_store_for(node) self.stores[node] = store def remove_store(self, node): store = self.stores.pop(node) return store def _get_store_for(self, node): if self.stores[node] is None: self.stores[node] = self._make_store_for(node) return self.stores[node] def _make_store_for(self, node): return {} def __len__(self): largest = 0 for output, store in self.stores.items(): if store is None: continue largest = max(largest, len(store)) return largest
BSD 3-Clause New or Revised License
google/android-emulator-container-scripts
emu/android_release_zip.py
AndroidReleaseZip.copy
python
def copy(self, destination): try: return shutil.copy2(self.file_name, destination) except shutil.SameFileError: logging.warning("Will not copy to itself, ignoring..") return self.file_name
Copy the zipfile to the given destination. If the destination is the same as this zipfile the current path will be returned a no copy is made. Args: destination ({string}): The destination to copy this zip to. Returns: {string}: The path where this zip file was copied to
https://github.com/google/android-emulator-container-scripts/blob/022724c0733e23f4f200f0c11e01dd2246dc26ed/emu/android_release_zip.py#L74-L90
import collections import logging import os import shutil import zipfile from tqdm import tqdm from emu.utils import api_codename class AndroidReleaseZip(object): def __init__(self, file_name): self.file_name = file_name if not zipfile.is_zipfile(file_name): raise Exception("{} is not a zipfile!".format(file_name)) with zipfile.ZipFile(file_name, "r") as zip_file: self.props = collections.defaultdict(set) files = [x for x in zip_file.infolist() if "source.properties" in x.filename or "build.prop" in x.filename] for file in files: for key, value in self._unpack_properties(zip_file, file).items(): self.props[key] = value def _unpack_properties(self, zip_file, zip_info): prop = zip_file.read(zip_info).decode("utf-8").splitlines() res = dict([a.split("=") for a in prop if "=" in a]) return res def __str__(self): return "{}-{}".format(self.description(), self.revision()) def description(self): return self.props.get("Pkg.Desc") def revision(self): return self.props.get("Pkg.Revision") def build_id(self): if "Pkg.BuildId" in self.props: return self.props.get("Pkg.BuildId") return self.revision() def is_system_image(self): return "System Image" in self.description() or "Android SDK Platform" in self.description() def is_emulator(self): return "Android Emulator" in self.description()
Apache License 2.0
reliaqualassociates/ramstk
src/ramstk/views/gtk3/hazard_analysis/view.py
HazardsWorkView._do_set_record_id
python
def _do_set_record_id(self, attributes: Dict[str, Any]) -> None: self._record_id = attributes["hazard_id"]
Set the record ID when a hazard is selected. :param attributes: the hazard dict for the selected hazard ID. :return: None :rtype: None
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/hazard_analysis/view.py#L121-L128
from typing import Any, Dict from pubsub import pub from ramstk.configuration import RAMSTKUserConfiguration from ramstk.logger import RAMSTKLogManager from ramstk.views.gtk3 import Gtk, _ from ramstk.views.gtk3.widgets import RAMSTKPanel, RAMSTKWorkView from . import HazardsTreePanel class HazardsWorkView(RAMSTKWorkView): _tag: str = "hazard" _tablabel: str = _("HazOps") _tabtooltip: str = _("Displays the HazOps analysis for the selected Function.") def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._lst_callbacks.insert(0, super().do_request_insert_sibling) self._lst_callbacks.insert(1, self.do_request_delete) self._lst_callbacks.insert(2, self._do_request_calculate) self._lst_icons.insert(0, "add") self._lst_icons.insert(1, "remove") self._lst_icons.insert(2, "calculate") self._lst_mnu_labels = [ _("Add Hazard"), _("Delete Selected Hazard"), _("Calculate HazOp"), _("Save Selected Hazard"), _("Save All Hazards"), ] self._lst_tooltips = [ _("Add a new hazard to the HazOps analysis."), _("Delete the selected hazard from the selected function."), _("Calculate the HazOps analysis."), _("Save changes to the selected hazard."), _("Save changes to all hazards."), ] self._pnlPanel: RAMSTKPanel = HazardsTreePanel() self.__make_ui() pub.subscribe(self._do_set_record_id, "selected_hazard") pub.subscribe(self._on_select_function, "selected_function") def _do_request_calculate(self, __button: Gtk.ToolButton) -> None: super().do_set_cursor_busy() pub.sendMessage("request_calculate_fha", node_id=self._record_id)
BSD 3-Clause New or Revised License
pvlib/pvanalytics
pvanalytics/quality/time.py
spacing
python
def spacing(times, freq): if not isinstance(freq, pd.Timedelta): freq = pd.Timedelta(freq) delta = times.to_series().diff() delta.iloc[0] = freq return delta == freq
Check that the spacing between `times` conforms to `freq`. Parameters ---------- times : DatetimeIndex freq : string or Timedelta Expected frequency of `times`. Returns ------- Series True when the difference between one time and the time before it conforms to `freq`. Notes ----- Copyright (c) 2019 SolarArbiter. See the file LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory of this distribution and at `<https://github.com/pvlib/ pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_ for more information.
https://github.com/pvlib/pvanalytics/blob/a3d3860ee848afd5cb7184b36beedcd14dbbdc60/pvanalytics/quality/time.py#L8-L38
import warnings import pandas as pd import numpy as np from scipy import stats
MIT License
executablebooks/mdformat
src/mdformat/renderer/_context.py
_wrap
python
def _wrap(text: str, *, width: int | Literal["no"]) -> str: text, replacements = _prepare_wrap(text) if width == "no": return _recover_preserve_chars(text, replacements) wrapper = textwrap.TextWrapper( break_long_words=False, break_on_hyphens=False, width=width, expand_tabs=False, replace_whitespace=False, ) wrapped = wrapper.fill(text) wrapped = _recover_preserve_chars(wrapped, replacements) return " " + wrapped if text.startswith(" ") else wrapped
Wrap text at locations pointed by `WRAP_POINT`s. Converts `WRAP_POINT`s to either a space or newline character, thus wrapping the text. Already existing whitespace will be preserved as is.
https://github.com/executablebooks/mdformat/blob/07f0b51998201ed1877bda023f61141a89e9b481/src/mdformat/renderer/_context.py#L319-L339
from __future__ import annotations from collections import defaultdict from collections.abc import Generator, Iterable, Mapping, MutableMapping from contextlib import contextmanager import logging import re import textwrap from types import MappingProxyType from typing import TYPE_CHECKING, Any, NamedTuple from mdformat import codepoints from mdformat._compat import Literal from mdformat._conf import DEFAULT_OPTS from mdformat.renderer._util import ( RE_CHAR_REFERENCE, decimalify_leading, decimalify_trailing, escape_asterisk_emphasis, escape_underscore_emphasis, get_list_marker_type, is_text_inside_autolink, is_tight_list, is_tight_list_item, longest_consecutive_sequence, maybe_add_link_brackets, ) from mdformat.renderer.typing import Postprocess, Render if TYPE_CHECKING: from mdformat.renderer import RenderTreeNode LOGGER = logging.getLogger(__name__) WRAP_POINT = "\x00" PRESERVE_CHAR = "\x00" def make_render_children(separator: str) -> Render: def render_children( node: RenderTreeNode, context: RenderContext, ) -> str: return separator.join(child.render(context) for child in node.children) return render_children def hr(node: RenderTreeNode, context: RenderContext) -> str: thematic_break_width = 70 return "_" * thematic_break_width def code_inline(node: RenderTreeNode, context: RenderContext) -> str: code = node.content all_chars_are_whitespace = not code.strip() longest_backtick_seq = longest_consecutive_sequence(code, "`") if longest_backtick_seq: separator = "`" * (longest_backtick_seq + 1) return f"{separator} {code} {separator}" if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace: return f"` {code} `" return f"`{code}`" def html_block(node: RenderTreeNode, context: RenderContext) -> str: content = node.content.rstrip("\n") content = content.lstrip() return content def html_inline(node: RenderTreeNode, context: RenderContext) -> str: return node.content def _in_block(block_name: str, node: RenderTreeNode) -> bool: while node.parent: if node.parent.type == block_name: return True node = node.parent return False def hardbreak(node: RenderTreeNode, context: RenderContext) -> str: if _in_block("heading", node): return "<br /> " return "\\" + "\n" def softbreak(node: RenderTreeNode, context: RenderContext) -> str: if context.do_wrap and _in_block("paragraph", node): return WRAP_POINT return "\n" def text(node: RenderTreeNode, context: RenderContext) -> str: text = node.content if is_text_inside_autolink(node): return text text = text.replace("\\", "\\\\") text = escape_asterisk_emphasis(text) text = escape_underscore_emphasis(text) text = text.replace("[", "\\[") text = text.replace("]", "\\]") text = text.replace("<", "\\<") text = text.replace("`", "\\`") text = RE_CHAR_REFERENCE.sub(r"\\\1", text) text = text.replace("\n\n", "&#10;&#10;") next_sibling = node.next_sibling if text.endswith("!") and next_sibling and next_sibling.type == "link": text = text[:-1] + "\\!" if context.do_wrap and _in_block("paragraph", node): text = re.sub(r"\s+", WRAP_POINT, text) return text def fence(node: RenderTreeNode, context: RenderContext) -> str: info_str = node.info.strip() lang = info_str.split(maxsplit=1)[0] if info_str else "" code_block = node.content if "`" in info_str or "~" in info_str: fence_char = "~" else: fence_char = "`" if lang in context.options.get("codeformatters", {}): fmt_func = context.options["codeformatters"][lang] try: code_block = fmt_func(code_block, info_str) except Exception: assert node.map is not None, "A fence token must have `map` attribute set" LOGGER.warning( f"Failed formatting content of a {lang} code block " f"(line {node.map[0] + 1} before formatting)" ) fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1) fence_str = fence_char * fence_len return f"{fence_str}{info_str}\n{code_block}{fence_str}" def code_block(node: RenderTreeNode, context: RenderContext) -> str: return fence(node, context) def image(node: RenderTreeNode, context: RenderContext) -> str: description = _render_inline_as_text(node, context) if context.do_wrap: description = description.replace(WRAP_POINT, " ") ref_label = node.meta.get("label") if ref_label: context.env["used_refs"].add(ref_label) ref_label_repr = ref_label.lower() if description.lower() == ref_label_repr: return f"![{description}]" return f"![{description}][{ref_label_repr}]" uri = node.attrs["src"] assert isinstance(uri, str) uri = maybe_add_link_brackets(uri) title = node.attrs.get("title") if title is not None: return f'![{description}]({uri} "{title}")' return f"![{description}]({uri})" def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str: def text_renderer(node: RenderTreeNode, context: RenderContext) -> str: return node.content def image_renderer(node: RenderTreeNode, context: RenderContext) -> str: return _render_inline_as_text(node, context) inline_renderers: Mapping[str, Render] = defaultdict( lambda: make_render_children(""), { "text": text_renderer, "image": image_renderer, "link": link, "softbreak": softbreak, }, ) inline_context = RenderContext( inline_renderers, context.postprocessors, context.options, context.env ) return make_render_children("")(node, inline_context) def link(node: RenderTreeNode, context: RenderContext) -> str: if node.info == "auto": autolink_url = node.attrs["href"] assert isinstance(autolink_url, str) if autolink_url.startswith("mailto:") and not node.children[ 0 ].content.startswith("mailto:"): autolink_url = autolink_url[7:] return "<" + autolink_url + ">" text = "".join(child.render(context) for child in node.children) if context.do_wrap: text = text.replace(WRAP_POINT, " ") ref_label = node.meta.get("label") if ref_label: context.env["used_refs"].add(ref_label) ref_label_repr = ref_label.lower() if text.lower() == ref_label_repr: return f"[{text}]" return f"[{text}][{ref_label_repr}]" uri = node.attrs["href"] assert isinstance(uri, str) uri = maybe_add_link_brackets(uri) title = node.attrs.get("title") if title is None: return f"[{text}]({uri})" assert isinstance(title, str) title = title.replace('"', '\\"') return f'[{text}]({uri} "{title}")' def em(node: RenderTreeNode, context: RenderContext) -> str: text = make_render_children(separator="")(node, context) indicator = node.markup return indicator + text + indicator def strong(node: RenderTreeNode, context: RenderContext) -> str: text = make_render_children(separator="")(node, context) indicator = node.markup return indicator + text + indicator def heading(node: RenderTreeNode, context: RenderContext) -> str: text = make_render_children(separator="")(node, context) if node.markup == "=": prefix = "# " elif node.markup == "-": prefix = "## " else: prefix = node.markup + " " text = text.replace("\n", " ") if text.endswith("#"): text = text[:-1] + "\\#" return prefix + text def blockquote(node: RenderTreeNode, context: RenderContext) -> str: marker = "> " with context.indented(len(marker)): text = make_render_children(separator="\n\n")(node, context) lines = text.splitlines() if not lines: return ">" quoted_lines = (f"{marker}{line}" if line else ">" for line in lines) quoted_str = "\n".join(quoted_lines) return quoted_str
MIT License
mitgcm/xmitgcm
xmitgcm/mds_store.py
open_mdsdataset
python
def open_mdsdataset(data_dir, grid_dir=None, iters='all', prefix=None, read_grid=True, delta_t=1, ref_date=None, calendar='gregorian', levels=None, geometry='sphericalpolar', grid_vars_to_coords=True, swap_dims=None, endian=">", chunks=None, ignore_unknown_vars=False, default_dtype=None, nx=None, ny=None, nz=None, llc_method="smallchunks", extra_metadata=None, extra_variables=None): frame = inspect.currentframe() _, _, _, arg_values = inspect.getargvalues(frame) del arg_values['frame'] function_name = inspect.getframeinfo(frame)[2] if swap_dims is None: if read_grid == False: swap_dims = False else: swap_dims = False if geometry in ( 'llc', 'cs', 'curvilinear') else True if swap_dims and not read_grid: raise ValueError("If swap_dims==True, read_grid must be True.") if type(prefix) in stringtypes: prefix = [prefix] else: pass if levels is not None and nz is not None: warnings.warn('levels has been set, nz will be ignored.') nz = None if isinstance(levels, slice): levels = np.arange(levels.start, levels.stop) if iters == 'all': iters = _get_all_iternums(data_dir, file_prefixes=prefix) if iters is None: iternum = None else: try: iternum = int(iters) except TypeError: if len(iters) == 1 and levels is None: iternum = int(iters[0]) else: first_prefixes = prefix or _get_all_matching_prefixes( data_dir, iters[0]) for iternum in iters: these_prefixes = _get_all_matching_prefixes( data_dir, iternum, prefix ) if set(these_prefixes) != set(first_prefixes): raise IOError("Could not find the expected file " "prefixes %s at iternum %g. (Instead " "found %s)" % (repr(first_prefixes), iternum, repr(these_prefixes))) chunks = chunks or {} kwargs = dict( grid_dir=grid_dir, delta_t=delta_t, swap_dims=False, prefix=prefix, ref_date=ref_date, calendar=calendar, geometry=geometry, grid_vars_to_coords=False, endian=endian, chunks=chunks, ignore_unknown_vars=ignore_unknown_vars, default_dtype=default_dtype, nx=nx, ny=ny, nz=nz, llc_method=llc_method, levels=levels, extra_metadata=extra_metadata, extra_variables=extra_variables) datasets = [open_mdsdataset( data_dir, iters=iternum, read_grid=False, **kwargs) for iternum in iters] if read_grid: if 'iters' in kwargs: kwargs.pop('iters') if 'read_grid' in kwargs: kwargs.pop('read_grid') if levels is not None: kwargs.pop('nz') kwargs.pop('levels') grid_dataset = open_mdsdataset(data_dir, iters=None, read_grid=True, **kwargs) if levels is not None: grid_dataset = grid_dataset.isel(**{coord: levels for coord in ['k', 'k_l', 'k_u', 'k_p1']}) datasets.insert(0, grid_dataset) if sys.version_info[0] < 3: ds = xr.auto_combine(datasets) elif xr.__version__ < '0.15.2': ds = xr.combine_by_coords(datasets) elif xr.__version__ < '0.18.0': ds = xr.combine_by_coords(datasets, compat='override', coords='minimal', combine_attrs='drop') else: ds = xr.combine_by_coords(datasets, compat='override', coords='minimal', combine_attrs='drop_conflicts') if swap_dims: ds = _swap_dimensions(ds, geometry) if grid_vars_to_coords: ds = _set_coords(ds) return ds store = _MDSDataStore(data_dir, grid_dir, iternum, delta_t, read_grid, prefix, ref_date, calendar, geometry, endian, ignore_unknown_vars=ignore_unknown_vars, default_dtype=default_dtype, nx=nx, ny=ny, nz=nz, llc_method=llc_method, levels=levels, extra_metadata=extra_metadata, extra_variables=extra_variables) ds = xr.Dataset.load_store(store) if swap_dims: ds = _swap_dimensions(ds, geometry) if grid_vars_to_coords: ds = _set_coords(ds) if 'time' in ds: ds['time'] = xr.decode_cf(ds[['time']])['time'] if chunks is not None: ds = ds.chunk(chunks) ds.attrs['Conventions'] = "CF-1.6" ds.attrs['title'] = "netCDF wrapper of MITgcm MDS binary data" ds.attrs['source'] = "MITgcm" arg_string = ', '.join(['%s=%s' % (str(k), repr(v)) for (k, v) in arg_values.items()]) ds.attrs['history'] = ('Created by calling ' '`%s(%s)`'% (function_name, arg_string)) return ds
Open MITgcm-style mds (.data / .meta) file output as xarray datset. Parameters ---------- data_dir : string Path to the directory where the mds .data and .meta files are stored grid_dir : string, optional Path to the directory where the mds .data and .meta files are stored, if different from ``data_dir``. iters : list, optional The iterations numbers of the files to be read. If ``None``, no data files will be read. If ``'all'`` (default), all iterations will be read. prefix : list, optional List of different filename prefixes to read. Default (``None``) is to read all available files. read_grid : bool, optional Whether to read the grid data delta_t : number, optional The timestep used in the model. (Can't be inferred.) ref_date : string, optional An iSO date string corresponding to the zero timestep, e.g. "1990-1-1 0:0:0" (See CF conventions [1]_) calendar : string, optional A calendar allowed by CF conventions [1]_ levels : list or slice, optional A list or slice of the indexes of the grid levels to read Same syntax as in the data.diagnostics file geometry : {'sphericalpolar', 'cartesian', 'llc', 'curvilinear', 'cs'} MITgcm grid geometry specifier grid_vars_to_coords : boolean, optional Whether to promote grid variables to coordinate status swap_dims : boolean, optional Whether to swap the logical dimensions for physical ones. If ``None``, will be set to ``False`` for ``geometry==llc`` and ``True`` otherwise. endian : {'=', '>', '<'}, optional Endianness of variables. Default for MITgcm is ">" (big endian) chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. ignore_unknown_vars : boolean, optional Don't raise an error if unknown variables are encountered while reading the dataset. default_dtype : numpy.dtype, optional A datatype to fall back on if the metadata can't be read. nx, ny, nz : int, optional The numerical dimensions of the model. These will be inferred from ``XC.meta`` and ``RC.meta`` if they are not specified. If ``geometry==llc``, ``ny`` does not have to specified. llc_method : {"smallchunks", "bigchunks"}, optional Which routine to use for reading LLC data. "smallchunks" splits the file into a individual dask chunk of size (nx x nx) for each face of each level (i.e. the total number of chunks is 13 * nz). "bigchunks" loads the whole raw data file (either into memory or as a numpy.memmap), splits it into faces, and concatenates those faces together using ``dask.array.concatenate``. The different methods will have different memory and i/o performance depending on the details of the system configuration. extra_metadata : dict, optional Allow to pass information on llc type grid (global or regional). The additional metadata is typically such as : aste = {'has_faces': True, 'ny': 1350, 'nx': 270, 'ny_facets': [450,0,270,180,450], 'pad_before_y': [90,0,0,0,0], 'pad_after_y': [0,0,0,90,90], 'face_facets': [0, 0, 2, 3, 4, 4], 'facet_orders' : ['C', 'C', 'C', 'F', 'F'], 'face_offsets' : [0, 1, 0, 0, 0, 1], 'transpose_face' : [False, False, False, True, True, True]} For global llc grids, no extra metadata is required and code will set up to global llc default configuration. extra_variables : dict, optional Allow to pass variables not listed in the variables.py or in available_diagnostics.log. extra_variables must be a dict containing the variable names as keys with the corresponging values being a dict with the keys being dims and attrs. Syntax: extra_variables = dict(varname = dict(dims=list_of_dims, attrs=dict(optional_attrs))) where optional_attrs can contain standard_name, long_name, units as keys Example: extra_variables = dict( ADJtheta = dict(dims=['k','j','i'], attrs=dict( standard_name='Sensitivity_to_theta', long_name='Sensitivity of cost function to theta', units='[J]/degC')) ) Returns ------- dset : xarray.Dataset Dataset object containing all coordinates and variables. References ---------- .. [1] http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/ch04s04.html
https://github.com/mitgcm/xmitgcm/blob/d500eec80c75d3e2dfc6bb52db32ca6d9c4819c5/xmitgcm/mds_store.py#L53-L304
from __future__ import print_function, division from glob import glob import os import re import numpy as np import warnings from io import StringIO import inspect import xarray as xr import dask.array as da import sys from .variables import dimensions, horizontal_coordinates_spherical, horizontal_coordinates_cartesian, horizontal_coordinates_curvcart, horizontal_coordinates_llc, horizontal_coordinates_cs, vertical_coordinates, horizontal_grid_variables, vertical_grid_variables, volume_grid_variables, state_variables, aliases, package_state_variables, extra_grid_variables, mask_variables from .utils import parse_meta_file, read_mds, parse_available_diagnostics, get_extra_metadata from .file_utils import listdir, listdir_startswith, listdir_endswith, listdir_startsandendswith, listdir_fnmatch if (sys.version_info > (3, 0)): stringtypes = [str] else: stringtypes = [str, unicode] try: from xarray.core.pycompat import OrderedDict except ImportError: from collections import OrderedDict LLC_NUM_FACES = 13 CS_NUM_FACES = 6 FACE_DIMNAME = 'face'
MIT License
jpstrydom/crypto-trading-bot
src/trader.py
Trader.initialise
python
def initialise(self): try: if len(self.Database.app_data["coinPairs"]) < 1: self.Database.store_coin_pairs(self.get_markets("BTC")) self.Messenger.print_header(len(self.Database.app_data["coinPairs"])) except ConnectionError as exception: self.Messenger.print_error("connection", [], True) logger.exception(exception) exit()
Fetch the initial coin pairs to track and to print the header line
https://github.com/jpstrydom/crypto-trading-bot/blob/94b5aab261a35d99bc044267baf4735f0ee3f89a/src/trader.py#L23-L34
import pydash as py_ import time from bittrex import Bittrex from messenger import Messenger from database import Database from logger import logger class Trader(object): def __init__(self, secrets, settings): self.trade_params = settings["tradeParameters"] self.pause_params = settings["pauseParameters"] self.Bittrex = Bittrex(secrets) self.Messenger = Messenger(secrets, settings) self.Database = Database()
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/django/core/serializers/xml_serializer.py
Serializer._start_relational_field
python
def _start_relational_field(self, field): self.indent(2) self.xml.startElement('field', { 'name': field.name, 'rel': field.remote_field.__class__.__name__, 'to': str(field.remote_field.model._meta), })
Output the <field> element for relational fields.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/core/serializers/xml_serializer.py#L139-L146
from xml.dom import pulldom from xml.sax import handler from xml.sax.expatreader import ExpatParser as _ExpatParser from django.apps import apps from django.conf import settings from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.xmlutils import ( SimplerXMLGenerator, UnserializableContentError, ) class Serializer(base.Serializer): def indent(self, level): if self.options.get('indent') is not None: self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level) def start_serialization(self): self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)) self.xml.startDocument() self.xml.startElement("django-objects", {"version": "1.0"}) def end_serialization(self): self.indent(0) self.xml.endElement("django-objects") self.xml.endDocument() def start_object(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) attrs = {'model': str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj.pk if obj_pk is not None: attrs['pk'] = str(obj_pk) self.xml.startElement("object", attrs) def end_object(self, obj): self.indent(1) self.xml.endElement("object") def handle_field(self, obj, field): self.indent(2) self.xml.startElement('field', { 'name': field.name, 'type': field.get_internal_type(), }) if getattr(obj, field.name) is not None: try: self.xml.characters(field.value_to_string(obj)) except UnserializableContentError: raise ValueError("%s.%s (pk:%s) contains unserializable characters" % ( obj.__class__.__name__, field.name, obj.pk)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_fk_field(self, obj, field): self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) related = related.natural_key() for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_m2m_field(self, obj, field): if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): def handle_m2m(value): natural = value.natural_key() self.xml.startElement("object", {}) for key_value in natural: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") self.xml.endElement("object") else: def handle_m2m(value): self.xml.addQuickElement("object", attrs={ 'pk': str(value.pk) }) for relobj in getattr(obj, field.name).iterator(): handle_m2m(relobj) self.xml.endElement("field")
MIT License
klavinslab/coral
coral/design/_sequence_generation/random_sequences.py
random_dna
python
def random_dna(n): return coral.DNA(''.join([random.choice('ATGC') for i in range(n)]))
Generate a random DNA sequence. :param n: Output sequence length. :type n: int :returns: Random DNA sequence of length n. :rtype: coral.DNA
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_sequence_generation/random_sequences.py#L7-L16
import random import coral from coral.constants.molecular_bio import CODON_FREQ_BY_AA
MIT License
google/personfinder
app/tasks.py
CleanUpInTestMode.schedule_next_task
python
def schedule_next_task(self, cursor, utcnow): self.add_task_for_repo( self.repo, self.task_name(), self.ACTION, utcnow=str(calendar.timegm(utcnow.utctimetuple())), cursor=cursor, queue_name='clean_up_in_test_mode')
Schedule the next task for to carry on with this query.
https://github.com/google/personfinder/blob/475f4c0ce916036d39bae2d480cde07126550875/app/tasks.py#L80-L89
import calendar import copy import datetime import logging import time import StringIO from google.appengine import runtime from google.appengine.api import datastore_errors from google.appengine.api import quota from google.appengine.api import taskqueue from google.appengine.ext import db import cloud_storage import config import const import model import photo import pfif import record_writer import utils CPU_MEGACYCLES_PER_REQUEST = 1000 FETCH_LIMIT = 100 PFIF = pfif.PFIF_VERSIONS[pfif.PFIF_DEFAULT_VERSION] class CleanUpInTestMode(utils.BaseHandler): repo_required = False ACTION = 'tasks/clean_up_in_test_mode' https_required = False DELETION_AGE_SECONDS = 24 * 3600 def __init__(self, request, response, env): utils.BaseHandler.__init__(self, request, response, env) self.__listener = None def task_name(self): return 'clean-up-in-test-mode'
Apache License 2.0