repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py
|
state._get_preempt_delay
|
python
|
def _get_preempt_delay(self):
return self.__preempt_delay
|
Getter method for preempt_delay, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/preempt_delay (uint16)
YANG Description: Set the delay the higher priority router waits
before preempting
|
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py#L609-L616
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
__slots__ = (
"_path_helper",
"_extmethods",
"__virtual_router_id",
"__virtual_address",
"__priority",
"__preempt",
"__preempt_delay",
"__accept_mode",
"__advertisement_interval",
"__current_priority",
"__virtual_link_local",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__virtual_router_id = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
self.__virtual_address = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=False,
)
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
self.__preempt = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
self.__preempt_delay = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..3600"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
0
),
is_leaf=True,
yang_name="preempt-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=False,
)
self.__accept_mode = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="accept-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
self.__advertisement_interval = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["1..4095"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
100
),
is_leaf=True,
yang_name="advertisement-interval",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=False,
)
self.__current_priority = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="current-priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
self.__virtual_link_local = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="virtual-link-local",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"routed-vlan",
"ipv6",
"addresses",
"address",
"vrrp",
"vrrp-group",
"state",
]
def _get_virtual_router_id(self):
return self.__virtual_router_id
def _set_virtual_router_id(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """virtual_router_id must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""",
}
)
self.__virtual_router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_virtual_router_id(self):
self.__virtual_router_id = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
def _get_virtual_address(self):
return self.__virtual_address
def _set_virtual_address(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """virtual_address must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ip-address', is_config=False)""",
}
)
self.__virtual_address = t
if hasattr(self, "_set"):
self._set()
def _unset_virtual_address(self):
self.__virtual_address = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=False,
)
def _get_priority(self):
return self.__priority
def _set_priority(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
def _get_preempt(self):
return self.__preempt
def _set_preempt(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """preempt must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""",
}
)
self.__preempt = t
if hasattr(self, "_set"):
self._set()
def _unset_preempt(self):
self.__preempt = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
|
Apache License 2.0
|
margitaii/pydeequ
|
src/pydeequ/checks.py
|
Check.isGreaterThan
|
python
|
def isGreaterThan(self, columnA, columnB, assertion = is_one):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.isGreaterThan(
columnA,
columnB,
function,
getattr(self.jvmCheck, "isGreaterThan$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
|
Asserts that, in each row, the value of columnA is greater than the value of columnB
@param columnA Column to run the assertion on
@param columnB Column to run the assertion on
@param assertion Function that receives a double input parameter and returns a boolean
@param hint A hint to provide additional context why a constraint could have failed
|
https://github.com/margitaii/pydeequ/blob/ca50132ac63d3be7f7ccc477b0d59db403e7d707/src/pydeequ/checks.py#L722-L744
|
import py4j.java_gateway as jg
from pydeequ.exceptions import JavaClassNotFoundException
import pydeequ.jvm_conversions as jc
import pdb
def is_one(x):
return x == 1
class Check(object):
def __init__(self, SparkSession, level='error', description=None,
jvmCheck=None):
self.spark = SparkSession
self._level = level
self._description = description
if jvmCheck:
self.jvmCheck = jvmCheck
else:
deequ_check = self._jvm.com.amazon.deequ.checks.Check
if not isinstance(deequ_check, jg.JavaClass):
raise JavaClassNotFoundException("com.amazon.deequ.checks.Check")
self.jvmCheck = deequ_check(
self._jvm_level,
self._description,
getattr(deequ_check, "apply$default$3")()
)
@property
def _jvm(self):
return self.spark.sparkContext._jvm
@property
def level(self):
return self._level
@property
def description(self):
return self._description
@property
def _jvm_level(self):
if self._level == 'error':
return self._jvm.com.amazon.deequ.checks.CheckLevel.Error()
elif self._level == 'warning':
return self._jvm.com.amazon.deequ.checks.CheckLevel.Warning()
else:
raise ValueError("Invalid 'level'")
def hasSize(self, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasSize(
function,
getattr(self.jvmCheck, "hasSize$default$2")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def isUnique(self, column):
jvmConstraint = self.jvmCheck.isUnique(
column,
getattr(self.jvmCheck, "isUnique$default$2")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasCompleteness(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasCompleteness(
column,
function,
getattr(self.jvmCheck, "hasCompleteness$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasUniqueness(self, columns, assertion):
if (not isinstance(columns, list)):
columns = [columns]
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasUniqueness(
jc.iterable_to_scala_seq(self._jvm, columns),
function
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasDistinctness(self, columns, assertion):
if (not isinstance(columns, list)):
columns = [columns]
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasDistinctness(
jc.iterable_to_scala_seq(self._jvm, columns),
function,
getattr(self.jvmCheck, "hasDistinctness$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasUniqueValueRatio(self, columns, assertion):
if (not isinstance(columns, list)):
columns = [columns]
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasUniqueValueRatio(
jc.iterable_to_scala_seq(self._jvm, columns),
function,
getattr(self.jvmCheck, "hasUniqueValueRatio$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasNumberOfDistinctValues(self, column, assertion,
binningUdf = None, maxBins = None):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasNumberOfDistinctValues(
column,
function,
getattr(self.jvmCheck, "hasNumberOfDistinctValues$default$3")(),
getattr(self.jvmCheck, "hasNumberOfDistinctValues$default$4")(),
getattr(self.jvmCheck, "hasNumberOfDistinctValues$default$5")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasHistogramValues(self, column, assertion,
binningUdf = None, maxBins = None):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasHistogramValues(
column,
function,
getattr(self.jvmCheck, "hasHistogramValues$default$3")(),
getattr(self.jvmCheck, "hasHistogramValues$default$4")(),
getattr(self.jvmCheck, "hasHistogramValues$default$5")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasEntropy(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasEntropy(
column,
function,
getattr(self.jvmCheck, "hasEntropy$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMutualInformation(self, columnA, columnB, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMutualInformation(
columnA,
columnB,
function,
getattr(self.jvmCheck, "hasMutualInformation$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasApproxQuantile(self, column, quantile, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasApproxQuantile(
column,
quantile,
function,
getattr(self.jvmCheck, "hasApproxQuantile$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMinLength(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMinLength(
column,
function,
getattr(self.jvmCheck, "hasMinLength$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMaxLength(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMaxLength(
column,
function,
getattr(self.jvmCheck, "hasMaxLength$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMin(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMin(
column,
function,
getattr(self.jvmCheck, "hasMin$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMax(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMax(
column,
function,
getattr(self.jvmCheck, "hasMax$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasMean(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasMean(
column,
function,
getattr(self.jvmCheck, "hasMean$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasSum(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasSum(
column,
function,
getattr(self.jvmCheck, "hasSum$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasStandardDeviation(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasStandardDeviation(
column,
function,
getattr(self.jvmCheck, "hasStandardDeviation$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasApproxCountDistinct(self, column, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasApproxCountDistinct(
column,
function,
getattr(self.jvmCheck, "hasApproxCountDistinct$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasCorrelation(self, columnA, columnB, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasCorrelation(
columnA,
columnB,
function,
getattr(self.jvmCheck, "hasCorrelation$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def satisfies(self, columnCondition, constraintName, assertion):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.satisfies(
columnCondition,
constraintName,
function,
getattr(self.jvmCheck, "satisfies$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def hasPattern(self, column, pattern, assertion = is_one):
pass
def hasDataType(self, column, dataType, assertion):
_jconstDataTypes = self._jvm.com.amazon.deequ.constraints.ConstrainableDataTypes
dataTypes = {
'null': _jconstDataTypes.Null(),
'boolean': _jconstDataTypes.Boolean(),
'string': _jconstDataTypes.String(),
'numeric': _jconstDataTypes.Numeric(),
'fractional': _jconstDataTypes.Fractional(),
'integer': _jconstDataTypes.Integral()
}
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.hasDataType(
column,
dataTypes[dataType],
function,
getattr(self.jvmCheck, "hasDataType$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def isPositive(self, column, assertion = is_one):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.isPositive(
column,
function,
getattr(self.jvmCheck, "isPositive$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def isNonNegative(self, column, assertion = is_one):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.isNonNegative(
column,
function,
getattr(self.jvmCheck, "isNonNegative$default$3")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def isLessThan(self, columnA, columnB, assertion = is_one):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.isLessThan(
columnA,
columnB,
function,
getattr(self.jvmCheck, "isLessThan$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
def isLessThanOrEqualTo(self, columnA, columnB, assertion = is_one):
function = jc.scala_function1(self.spark.sparkContext._gateway,
assertion)
jvmConstraint = self.jvmCheck.isLessThanOrEqualTo(
columnA,
columnB,
function,
getattr(self.jvmCheck, "isLessThanOrEqualTo$default$4")()
)
return Check(
self.spark,
self.level,
self.description,
jvmConstraint
)
|
Apache License 2.0
|
creativecommons/cc-licenses
|
licenses/transifex.py
|
TransifexHelper.check_for_translation_updates
|
python
|
def check_for_translation_updates(
self,
update_repo=False,
):
pass
|
This function wraps
check_for_translation_updates_with_repo_and_legal_codes() to make
testing easier. Otherwise, there's no need or reason for it.
|
https://github.com/creativecommons/cc-licenses/blob/c4a85d22673c21777a302321207538b7f1ff27bd/licenses/transifex.py#L995-L1015
|
import logging
from typing import Iterable
import dateutil.parser
import git
import polib
import requests
from django.conf import settings
from transifex.api import transifex_api
import licenses.models
from i18n.utils import (
get_pofile_content,
get_pofile_creation_date,
get_pofile_path,
get_pofile_revision_date,
map_django_to_transifex_language_code,
)
LEGALCODES_KEY = "__LEGALCODES__"
def _empty_branch_object():
return {LEGALCODES_KEY: []}
class TransifexHelper:
def __init__(self, dryrun: bool = True, logger: logging.Logger = None):
self.dryrun = dryrun
self.nop = "<NOP> " if dryrun else ""
self.log = logger if logger else logging.getLogger()
self.organization_slug = settings.TRANSIFEX["ORGANIZATION_SLUG"]
self.project_slug = settings.TRANSIFEX["PROJECT_SLUG"]
self.team_id = settings.TRANSIFEX["TEAM_ID"]
self.project_id = f"o:{self.organization_slug}:p:{self.project_slug}"
self.api = transifex_api
self.api.setup(auth=settings.TRANSIFEX["API_TOKEN"])
self.api_organization = self.api.Organization.get(
slug=self.organization_slug
)
for project in self.api_organization.fetch(
"projects"
):
if project.attributes["slug"] == self.project_slug:
self.api_project = project
break
for i18n_format in self.api.I18nFormat.filter(
organization=self.api_organization
):
if i18n_format.id == "PO":
self.api_i18n_format = i18n_format
break
def get_transifex_resource_stats(self):
stats = {}
resources = sorted(
self.api_project.fetch("resources").all(), key=lambda x: x.id
)
for resource in resources:
r_slug = resource.attributes["slug"]
stats[r_slug] = resource.attributes
return stats
def get_transifex_translation_stats(self):
stats = {}
languages_stats = sorted(
self.api.ResourceLanguageStats.filter(
project=self.api_project
).all(),
key=lambda x: x.id,
)
for l_stats in languages_stats:
resource_slug = l_stats.related["resource"].id.split(":")[-1]
transifex_code = l_stats.related["language"].id.split(":")[-1]
if resource_slug in ["cc-search", "deeds-choosers"]:
continue
if resource_slug not in stats:
stats[resource_slug] = {}
stats[resource_slug][transifex_code] = l_stats.attributes
return stats
@property
def resource_stats(self):
if not hasattr(self, "_resource_stats"):
self._resource_stats = self.get_transifex_resource_stats()
return self._resource_stats
@property
def translation_stats(self):
if not hasattr(self, "_translation_stats"):
self._translation_stats = self.get_transifex_translation_stats()
return self._translation_stats
def clear_transifex_stats(self):
if hasattr(self, "_resource_stats"):
delattr(self, "_resource_stats")
if hasattr(self, "_translation_stats"):
delattr(self, "_translation_stats")
def transifex_get_pofile_content(
self, resource_slug, transifex_code
) -> bytes:
resource = self.api.Resource.get(
project=self.api_project, slug=resource_slug
)
i18n_type = resource.attributes["i18n_type"]
if i18n_type != "PO":
raise ValueError(
f"Transifex {resource_slug} file format is not 'PO'. It is:"
f" {i18n_type}"
)
if transifex_code == settings.LANGUAGE_CODE:
url = self.api.ResourceStringsAsyncDownload.download(
resource=resource,
content_encoding="text",
file_type="default",
)
else:
language = self.api.Language.get(code=transifex_code)
url = self.api.ResourceTranslationsAsyncDownload.download(
resource=resource,
language=language,
mode="translator",
)
pofile_content = requests.get(url).content
return pofile_content
def build_local_data(
self, legal_codes: Iterable["licenses.models.LegalCode"]
):
local_data = {}
resource_name = settings.DEEDS_UX_RESOURCE_NAME
resource_slug = settings.DEEDS_UX_RESOURCE_SLUG
pofile_path = get_pofile_path(
locale_or_legalcode="locale",
language_code=settings.LANGUAGE_CODE,
translation_domain="django",
)
pofile_obj = polib.pofile(pofile_path)
creation_date = get_pofile_creation_date(pofile_obj)
revision_date = get_pofile_revision_date(pofile_obj)
local_data[resource_slug] = {
"name": resource_name,
"pofile_path": pofile_path,
"pofile_obj": pofile_obj,
"creation_date": creation_date,
"revision_date": revision_date,
"translations": {},
}
for (
language_code,
language_data,
) in settings.DEEDS_UX_PO_FILE_INFO.items():
if language_code == settings.LANGUAGE_CODE:
continue
pofile_path = get_pofile_path(
locale_or_legalcode="locale",
language_code=language_code,
translation_domain="django",
)
pofile_obj = polib.pofile(pofile_path)
creation_date = language_data["creation_date"]
revision_date = language_data["revision_date"]
local_data[resource_slug]["translations"][language_code] = {
"pofile_path": pofile_path,
"pofile_obj": pofile_obj,
"creation_date": creation_date,
"revision_date": revision_date,
}
for legal_code in legal_codes:
resource_name = legal_code.license.identifier()
resource_slug = legal_code.license.resource_slug
if resource_slug in local_data:
continue
pofile_path = legal_code.get_english_pofile_path()
pofile_obj = polib.pofile(pofile_path)
local_data[resource_slug] = {
"name": resource_name,
"pofile_path": pofile_path,
"pofile_obj": pofile_obj,
"creation_date": creation_date,
"revision_date": revision_date,
"translations": {},
}
for legal_code in legal_codes:
resource_slug = legal_code.license.resource_slug
language_code = legal_code.language_code
if language_code == settings.LANGUAGE_CODE:
continue
pofile_path = legal_code.translation_filename()
pofile_obj = polib.pofile(pofile_path)
creation_date = get_pofile_creation_date(pofile_obj)
revision_date = get_pofile_revision_date(pofile_obj)
local_data[resource_slug]["translations"][language_code] = {
"pofile_path": pofile_path,
"pofile_obj": pofile_obj,
"creation_date": creation_date,
"revision_date": revision_date,
}
return local_data
def add_resource_to_transifex(
self,
language_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
transifex_code = map_django_to_transifex_language_code(language_code)
if resource_slug in self.resource_stats.keys():
self.log.debug(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: Transifex already contains resource."
)
return
self.log.warning(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
f" Transifex does not yet contain resource. Creating using"
f" {pofile_path}."
)
if self.dryrun:
return
self.api.Resource.create(
name=resource_name,
slug=resource_slug,
relationships={
"i18n_format": self.api_i18n_format,
"project": self.api_project,
},
)
resource = self.api.Resource.get(
project=self.api_project, slug=resource_slug
)
for entry in pofile_obj:
if entry.msgid == entry.msgstr:
entry.msgstr = ""
pofile_content = get_pofile_content(pofile_obj)
self.api.ResourceStringsAsyncUpload.upload(
resource=resource,
content=pofile_content,
)
self.clear_transifex_stats()
def add_translation_to_transifex_resource(
self,
language_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
transifex_code = map_django_to_transifex_language_code(language_code)
if language_code == settings.LANGUAGE_CODE:
raise ValueError(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: This function,"
" add_translation_to_transifex_resource(), is for"
" translations, not sources."
)
elif resource_slug not in self.resource_stats.keys():
raise ValueError(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: Transifex does not yet contain resource."
" The add_resource_to_transifex() function must be called"
" before this one: add_translation_to_transifex_resource()."
)
elif (
resource_slug in self.translation_stats
and transifex_code in self.translation_stats[resource_slug]
and self.translation_stats[resource_slug][transifex_code].get(
"translated_strings", 0
)
> 0
):
self.log.debug(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: Transifex already contains translation."
)
return
pofile_content = get_pofile_content(pofile_obj)
language = self.api.Language.get(code=transifex_code)
resource = self.api.Resource.get(
project=self.api_project, slug=resource_slug
)
if not self.dryrun:
self.api.ResourceTranslationsAsyncUpload.upload(
resource=resource,
content=pofile_content,
language=language.id,
)
self.clear_transifex_stats()
self.log.info(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: Transifex does not yet contain"
f" translation. Added using {pofile_path}."
)
def normalize_pofile_language(
self,
language_code,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
keys = {
"Language": transifex_code,
"Language-Django": language_code,
"Language-Transifex": transifex_code,
}
all_present_and_correct = True
for key, value in keys.items():
if pofile_obj.metadata.get(key, "") != value:
all_present_and_correct = False
if all_present_and_correct:
return pofile_obj
for key, value in keys.items():
if pofile_obj.metadata.get(key, "") != value:
self.log.info(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}:"
f" Correcting PO file '{key}':"
f"\n{pofile_path}: New Value: '{transifex_code}'"
)
if not self.dryrun:
pofile_obj.metadata[key] = value
if self.dryrun:
return pofile_obj
pofile_obj.save(pofile_path)
return pofile_obj
def normalize_pofile_language_team(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
key = "Language-Team"
if transifex_code == settings.LANGUAGE_CODE:
translation_team = (
f"https://www.transifex.com/{self.organization_slug}/"
f"{self.project_slug}/"
)
else:
translation_team = (
f"https://www.transifex.com/{self.organization_slug}/teams/"
f"{self.team_id}/{transifex_code}/"
)
if (
key in pofile_obj.metadata
and pofile_obj.metadata[key] == translation_team
):
return pofile_obj
self.log.info(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
f" Correcting PO file '{key}':"
f"\n{pofile_path}: New Value: '{translation_team}'"
)
if self.dryrun:
return pofile_obj
pofile_obj.metadata[key] = translation_team
pofile_obj.save(pofile_path)
return pofile_obj
def normalize_pofile_last_translator(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
key = "Last-Translator"
filler_data = "FULL NAME <EMAIL@ADDRESS>"
if key not in pofile_obj.metadata:
return pofile_obj
last_translator = pofile_obj.metadata[key]
if last_translator != filler_data:
return pofile_obj
self.log.info(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
f" Correcting PO file '{key}':"
f"\n{pofile_path}: Removing: '{filler_data}'"
)
if self.dryrun:
return pofile_obj
del pofile_obj.metadata[key]
pofile_obj.save(pofile_path)
return pofile_obj
def normalize_pofile_project_id(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
key = "Project-Id-Version"
if pofile_obj.metadata.get(key, None) == resource_slug:
return pofile_obj
self.log.info(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
f" Correcting PO file '{key}':"
f"\n{pofile_path}: New value: '{resource_slug}'"
)
if self.dryrun:
return pofile_obj
pofile_obj.metadata[key] = resource_slug
pofile_obj.save(pofile_path)
return pofile_obj
def normalize_pofile_metadata(
self,
language_code,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
pofile_obj = self.normalize_pofile_language(
language_code,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
pofile_obj = self.normalize_pofile_language_team(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
pofile_obj = self.normalize_pofile_last_translator(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
pofile_obj = self.normalize_pofile_project_id(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
return pofile_obj
def update_pofile_creation_to_match_transifex(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
pofile_creation,
transifex_creation,
):
pad = len(pofile_path)
label = f"Transifex {resource_slug} {transifex_code}"
self.log.info(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
" Correcting PO file 'POT-Creation-Date' to match Transifex:"
f"\n{pofile_path}: {pofile_creation}"
f"\n{label:>{pad}}: {transifex_creation}"
)
if self.dryrun:
return pofile_obj
pofile_obj.metadata["POT-Creation-Date"] = str(transifex_creation)
pofile_obj.save(pofile_path)
return pofile_obj
def update_pofile_revision_to_match_transifex(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
pofile_revision,
transifex_revision,
):
pad = len(pofile_path)
label = f"Transifex {resource_slug} {transifex_code}"
self.log.info(
f"{self.nop}{resource_name} ({resource_slug}) {transifex_code}:"
" Correcting PO file 'PO-Revision-Date' to match Transifex:"
f"\n{label:>{pad}}: {transifex_revision}"
f"\n{pofile_path}: {pofile_revision}"
)
if self.dryrun:
return pofile_obj
pofile_obj.metadata["PO-Revision-Date"] = str(transifex_revision)
pofile_obj.save(pofile_path)
return pofile_obj
def normalize_pofile_dates(
self,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
):
pad = len(pofile_path)
pofile_creation = get_pofile_creation_date(pofile_obj)
pofile_revision = get_pofile_revision_date(pofile_obj)
transifex_creation = dateutil.parser.parse(
self.resource_stats[resource_slug]["datetime_created"]
)
transifex_revision = dateutil.parser.parse(
self.resource_stats[resource_slug]["datetime_modified"]
)
transifex_label = f"{transifex_code} Transifex {resource_slug}.po"
if pofile_creation is None or transifex_creation < pofile_creation:
pofile_obj = self.update_pofile_creation_to_match_transifex(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
pofile_creation,
transifex_creation,
)
elif transifex_creation != pofile_creation:
self.log.error(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: 'POT-Creation-Date' mismatch:"
f"\n{transifex_label:>{pad}}: {transifex_creation}"
f"\n{pofile_path}: {pofile_creation}"
)
if pofile_revision is None:
pofile_obj = self.update_pofile_revision_to_match_transifex(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
pofile_revision,
transifex_revision,
)
elif pofile_revision != transifex_revision:
transifex_pofile_content = self.transifex_get_pofile_content(
resource_slug, transifex_code
)
transifex_pofile_obj = polib.pofile(
pofile=transifex_pofile_content.decode(), encoding="utf-8"
)
po_entries_are_the_same = True
for index, entry in enumerate(pofile_obj):
if pofile_obj[index] != transifex_pofile_obj[index]:
po_entries_are_the_same = False
break
if po_entries_are_the_same:
pofile_obj = self.update_pofile_revision_to_match_transifex(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
pofile_revision,
transifex_revision,
)
else:
self.log.error(
f"{self.nop}{resource_name} ({resource_slug})"
f" {transifex_code}: 'PO-Revision-Date' mismatch:"
f"\n{transifex_label:>{pad}}: {transifex_revision}"
f"\n{pofile_path}: {pofile_revision}"
)
return pofile_obj
def normalize_translations(self):
legal_codes = (
licenses.models.LegalCode.objects.valid()
.translated()
.exclude(language_code=settings.LANGUAGE_CODE)
)
repo = git.Repo(settings.DATA_REPOSITORY_DIR)
if repo.is_dirty():
self.log.warning(f"{self.nop}Repository is dirty.")
local_data = self.build_local_data(legal_codes)
for resource_slug, resource in local_data.items():
language_code = settings.LANGUAGE_CODE
transifex_code = map_django_to_transifex_language_code(
language_code
)
resource_name = resource["name"]
pofile_path = resource["pofile_path"]
pofile_obj = resource["pofile_obj"]
pofile_obj = self.normalize_pofile_metadata(
language_code,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
self.add_resource_to_transifex(
language_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
pofile_obj = self.normalize_pofile_dates(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
for language_code, translation in resource["translations"].items():
transifex_code = map_django_to_transifex_language_code(
language_code
)
pofile_path = translation["pofile_path"]
pofile_obj = translation["pofile_obj"]
pofile_obj = self.normalize_pofile_metadata(
language_code,
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
self.add_translation_to_transifex_resource(
language_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
if transifex_code not in self.translation_stats[resource_slug]:
self.log.critical(
f"{resource_name} ({resource_slug}) {transifex_code}:"
" Language not yet supported by Transifex. Aborting"
" translation language processing."
)
continue
pofile_obj = self.normalize_pofile_dates(
transifex_code,
resource_slug,
resource_name,
pofile_path,
pofile_obj,
)
def check_for_translation_updates_with_repo_and_legal_codes(
self,
repo: git.Repo,
legal_codes: Iterable["licenses.models.LegalCode"],
update_repo=False,
):
pass
|
MIT License
|
flora-network/flora-blockchain
|
tests/core/fixtures.py
|
empty_blockchain
|
python
|
async def empty_blockchain(request):
bc1, connection, db_path = await create_blockchain(test_constants)
yield bc1
await connection.close()
bc1.shut_down()
db_path.unlink()
|
Provides a list of 10 valid blocks, as well as a blockchain with 9 blocks added to it.
|
https://github.com/flora-network/flora-blockchain/blob/8557b3bd6f71a0d45160e155b3a4731bc5cfd0aa/tests/core/fixtures.py#L40-L49
|
import pickle
from os import path
from pathlib import Path
from typing import List
import aiosqlite
import pytest
from chia.consensus.blockchain import Blockchain
from chia.consensus.constants import ConsensusConstants
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.hint_store import HintStore
from chia.types.full_block import FullBlock
from chia.util.db_wrapper import DBWrapper
from chia.util.path import mkdir
from tests.setup_nodes import bt, test_constants
blockchain_db_counter: int = 0
async def create_blockchain(constants: ConsensusConstants):
global blockchain_db_counter
db_path = Path(f"blockchain_test-{blockchain_db_counter}.db")
if db_path.exists():
db_path.unlink()
blockchain_db_counter += 1
connection = await aiosqlite.connect(db_path)
wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(wrapper)
store = await BlockStore.create(wrapper)
hint_store = await HintStore.create(wrapper)
bc1 = await Blockchain.create(coin_store, store, constants, hint_store)
assert bc1.get_peak() is None
return bc1, connection, db_path
@pytest.fixture(scope="function")
|
Apache License 2.0
|
geopython/geohealthcheck
|
GeoHealthCheck/plugins/probe/wfs3.py
|
WFS3Drilldown.perform_request
|
python
|
def perform_request(self):
oa_feat = None
collections = None
result = Result(True, 'Test Landing Page')
result.start()
try:
oa_feat = Features(self._resource.url,
headers=self.get_request_headers())
except Exception as err:
result.set(False, '%s:%s' % (result.message, str(err)))
result.stop()
self.result.add_result(result)
result = Result(True, 'conformance endpoint exists')
result.start()
try:
set_accept_header(oa_feat, type_for_link(
oa_feat.links, 'conformance'))
oa_feat.conformance()
except Exception as err:
result.set(False, str(err))
result.stop()
self.result.add_result(result)
result = Result(True, 'Get collections')
result.start()
try:
set_accept_header(oa_feat, type_for_link(
oa_feat.links, 'data'))
collections = oa_feat.collections()['collections']
except Exception as err:
result.set(False, '%s:%s' % (result.message, str(err)))
result.stop()
self.result.add_result(result)
result = Result(True, 'Test OpenAPI Doc')
result.start()
try:
set_accept_header(oa_feat, type_for_link(
oa_feat.links, 'service-desc'))
api_doc = oa_feat.api()
for attr in ['components', 'paths', 'openapi']:
val = api_doc.get(attr, None)
if val is None:
msg = 'missing attr: %s' % attr
result = push_result(
self, result, False, msg, 'Test OpenAPI doc')
continue
except Exception as err:
result.set(False, '%s:%s' % (result.message, str(err)))
result.stop()
self.result.add_result(result)
if self._parameters['drilldown_level'] == 'basic':
return
result = Result(True, 'Test Collections')
result.start()
coll_id = ''
try:
for collection in collections:
coll_id = collection['id']
coll_id = coll_id
try:
set_accept_header(oa_feat, type_for_link(
collection['links'], 'self'))
coll = oa_feat.collection(coll_id)
for attr in ['id', 'links']:
val = coll.get(attr, None)
if val is None:
msg = '%s: missing attr: %s' % (coll_id, attr)
result = push_result(
self, result, False, msg, 'Test Collection')
continue
except Exception as e:
msg = 'GetCollection %s: OWSLib err: %s ' % (str(e), coll_id)
result = push_result(
self, result, False, msg, 'Test GetCollection')
continue
try:
set_accept_header(oa_feat, 'application/geo+json')
items = oa_feat.collection_items(coll_id, limit=1)
except Exception as e:
msg = 'GetItems %s: OWSLib err: %s ' % (str(e), coll_id)
result = push_result(
self, result, False, msg, 'Test GetItems')
continue
features = items.get('features', None)
if features is None:
msg = 'GetItems %s: No features attr' % coll_id
result = push_result(
self, result, False, msg, 'Test GetItems')
continue
type = items.get('type', '')
if type != 'FeatureCollection':
msg = '%s:%s type not FeatureCollection: %s' % (coll_id, type, val)
result = push_result(
self, result, False, msg, 'Test GetItems')
continue
if len(items['features']) > 0:
fid = items['features'][0]['id']
try:
item = oa_feat.collection_item(coll_id, fid)
except Exception as e:
msg = 'GetItem %s: OWSLib err: %s' % (str(e), coll_id)
result = push_result(
self, result, False, msg, 'Test GetItem')
continue
for attr in ['id', 'links', 'properties', 'geometry', 'type']:
val = item.get(attr, None)
if val is None:
msg = '%s:%s missing attr: %s' % (coll_id, str(fid), attr)
result = push_result(
self, result, False, msg, 'Test GetItem')
continue
if attr == 'type' and val != 'Feature':
msg = '%s:%s type not Feature: %s' % (coll_id, str(fid), val)
result = push_result(
self, result, False, msg, 'Test GetItem')
continue
except Exception as err:
result.set(False, 'Collection err: %s : e=%s'
% (coll_id, str(err)))
result.stop()
self.result.add_result(result)
|
Perform the drilldown.
See https://github.com/geopython/OWSLib/blob/
master/tests/doctests/wfs3_GeoServerCapabilities.txt
|
https://github.com/geopython/geohealthcheck/blob/4579011a6b03ca71956596dd4b0de321b08707ae/GeoHealthCheck/plugins/probe/wfs3.py#L95-L261
|
from owslib.ogcapi.features import Features
from openapi_spec_validator import openapi_v3_spec_validator
from GeoHealthCheck.probe import Probe
from GeoHealthCheck.result import Result, push_result
class WFS3Caps(Probe):
NAME = 'OGC API Features (OAFeat) Capabilities'
DESCRIPTION = 'Validate OGC API Features (OAFeat) ' 'endpoint landing page'
RESOURCE_TYPE = 'OGC:WFS3'
REQUEST_METHOD = 'GET'
REQUEST_HEADERS = {'Accept': 'application/json'}
REQUEST_TEMPLATE = ''
def __init__(self):
Probe.__init__(self)
CHECKS_AVAIL = {
'GeoHealthCheck.plugins.check.checks.HttpStatusNoError': {
'default': True
},
'GeoHealthCheck.plugins.check.checks.JsonParse': {
'default': True
},
'GeoHealthCheck.plugins.check.checks.ContainsStrings': {
'set_params': {
'strings': {
'name': 'Contains required strings',
'value': ['/conformance', '/collections',
'service', 'links']
}
},
'default': True
},
}
def type_for_link(links, rel):
content_type = 'application/json'
for link in links:
if link['rel'] == rel:
content_type = link.get('type', content_type)
if 'json' in content_type:
break
return content_type
def set_accept_header(oa_feat, content_type):
oa_feat.headers['Accept'] = content_type
class WFS3Drilldown(Probe):
NAME = 'OGC API Features (OAFeat) Drilldown'
DESCRIPTION = 'Traverses an OGC API Features (OAFeat) API ' 'endpoint by drilling down'
RESOURCE_TYPE = 'OGC:WFS3'
REQUEST_METHOD = 'GET'
REQUEST_HEADERS = {'Accept': 'application/json'}
PARAM_DEFS = {
'drilldown_level': {
'type': 'string',
'description': 'How thorough the drilldown should be.\
basic: test presence endpoints, \
full: go through collections, fetch Features',
'default': 'basic',
'required': True,
'range': ['basic', 'full']
}
}
def __init__(self):
Probe.__init__(self)
|
MIT License
|
natashamjaques/neural_chat
|
ParlAI/parlai/mturk/tasks/light/light_chats/graph.py
|
GraphFunction.__init__
|
python
|
def __init__(
self,
function_name,
possible_arg_nums,
arg_split_words,
arg_find_type,
arg_constraints,
func_constraints,
callback_triggers,
):
self.name = function_name
self.possible_arg_nums = possible_arg_nums
self.arg_split_words = arg_split_words
self.arg_find_type = arg_find_type
self.arg_constraints = arg_constraints
self.func_constraints = func_constraints
self.callback_triggers = callback_triggers
self.formats = {'failed': '{1} couldn\'t do that'}
|
Create a new graph function
args:
function name - name or array of aliases for the function
possible_arg_nums - number or array of valid arg numbers for
determining if given args are valid or for breaking text into
a valid number of arguments before they're parsed into descs
arg_split_words - array of words to be used to split input args
arg_find_type - array of args for desc_to_nodes to use to find the
argument at the given index, following the form
{'type': <search type>, 'from': <arg idx of reference>}. If a
list is given for each argument use the same element for each
arg_constraints - constraints on whether or not found arguments are
valid to be matched during the parsing step. Form is array of
arrays of constraint types
func_constraints - constraints on whether a function will pass with
the given args, format is array of constraint types
callback_triggers - callback hook names and argument idxs for making
calls to callback functions upon completion of this function call
|
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/ParlAI/parlai/mturk/tasks/light/light_chats/graph.py#L91-L127
|
from collections import Counter
from copy import deepcopy
import random
INIT_HEALTH = 2
CONSTRAINTS = {}
GRAPH_FUNCTIONS = {}
def rm(d, val):
if val in d:
del d[val]
def format_observation(
self, graph, viewing_agent, action, telling_agent=None, is_constraint=False
):
use_actors = action['actors']
if is_constraint:
use_actors = use_actors[1:]
descs = [
graph.node_to_desc(a, from_id=action['room_id'], use_the=True)
for a in use_actors
]
try:
i = use_actors.index(viewing_agent)
descs[i] = 'you'
except BaseException:
pass
if telling_agent is not None:
try:
i = use_actors.index(telling_agent)
if i == 0:
descs[0] = 'I'
else:
descs[i] = 'me'
except BaseException:
pass
descs[0] = descs[0].capitalize()
if 'add_descs' in action:
descs += action['add_descs']
if is_constraint:
descs = [action['actors'][0]] + descs
return self.get_action_observation_format(action, descs).format(*descs)
def is_held_item(item_idx):
return [
{'type': 'is_type', 'in_args': [item_idx], 'args': [['object']]},
{'type': 'no_prop', 'in_args': [item_idx], 'args': ['equipped']},
]
def is_equipped_item(item_idx):
return [
{'type': 'is_type', 'in_args': [item_idx], 'args': [['object']]},
{'type': 'has_prop', 'in_args': [item_idx], 'args': ['equipped']},
]
class GraphFunction(object):
|
MIT License
|
yxgeee/bake
|
imagenet/pycls/core/checkpoint.py
|
get_last_checkpoint
|
python
|
def get_last_checkpoint():
checkpoint_dir = get_checkpoint_dir()
checkpoints = [f for f in os.listdir(checkpoint_dir) if _NAME_PREFIX in f]
last_checkpoint_name = sorted(checkpoints)[-1]
return os.path.join(checkpoint_dir, last_checkpoint_name)
|
Retrieves the most recent checkpoint (highest epoch number).
|
https://github.com/yxgeee/bake/blob/07c4f668ea19311d5b50121026e73d2f035d5765/imagenet/pycls/core/checkpoint.py#L34-L40
|
import os
import pycls.core.distributed as dist
import torch
from pycls.core.config import cfg
_NAME_PREFIX = "model_epoch_"
_DIR_NAME = "checkpoints"
def get_checkpoint_dir():
return os.path.join(cfg.OUT_DIR, _DIR_NAME)
def get_checkpoint(epoch):
name = "{}{:04d}.pyth".format(_NAME_PREFIX, epoch)
return os.path.join(get_checkpoint_dir(), name)
|
MIT License
|
aspose-words-cloud/aspose-words-cloud-python
|
asposewordscloud/models/paragraph_format_update.py
|
ParagraphFormatUpdate.line_spacing
|
python
|
def line_spacing(self):
return self._line_spacing
|
Gets the line_spacing of this ParagraphFormatUpdate. # noqa: E501
Gets or sets the line spacing (in points) for the paragraph. # noqa: E501
:return: The line_spacing of this ParagraphFormatUpdate. # noqa: E501
:rtype: float
|
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/paragraph_format_update.py#L429-L437
|
import pprint
import re
import datetime
import six
import json
class ParagraphFormatUpdate(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'link': 'WordsApiLink',
'add_space_between_far_east_and_alpha': 'bool',
'add_space_between_far_east_and_digit': 'bool',
'alignment': 'str',
'bidi': 'bool',
'drop_cap_position': 'str',
'first_line_indent': 'float',
'keep_together': 'bool',
'keep_with_next': 'bool',
'left_indent': 'float',
'line_spacing': 'float',
'line_spacing_rule': 'str',
'lines_to_drop': 'int',
'no_space_between_paragraphs_of_same_style': 'bool',
'outline_level': 'str',
'page_break_before': 'bool',
'right_indent': 'float',
'shading': 'Shading',
'space_after': 'float',
'space_after_auto': 'bool',
'space_before': 'float',
'space_before_auto': 'bool',
'style_identifier': 'str',
'style_name': 'str',
'suppress_auto_hyphens': 'bool',
'suppress_line_numbers': 'bool',
'widow_control': 'bool'
}
attribute_map = {
'link': 'Link',
'add_space_between_far_east_and_alpha': 'AddSpaceBetweenFarEastAndAlpha',
'add_space_between_far_east_and_digit': 'AddSpaceBetweenFarEastAndDigit',
'alignment': 'Alignment',
'bidi': 'Bidi',
'drop_cap_position': 'DropCapPosition',
'first_line_indent': 'FirstLineIndent',
'keep_together': 'KeepTogether',
'keep_with_next': 'KeepWithNext',
'left_indent': 'LeftIndent',
'line_spacing': 'LineSpacing',
'line_spacing_rule': 'LineSpacingRule',
'lines_to_drop': 'LinesToDrop',
'no_space_between_paragraphs_of_same_style': 'NoSpaceBetweenParagraphsOfSameStyle',
'outline_level': 'OutlineLevel',
'page_break_before': 'PageBreakBefore',
'right_indent': 'RightIndent',
'shading': 'Shading',
'space_after': 'SpaceAfter',
'space_after_auto': 'SpaceAfterAuto',
'space_before': 'SpaceBefore',
'space_before_auto': 'SpaceBeforeAuto',
'style_identifier': 'StyleIdentifier',
'style_name': 'StyleName',
'suppress_auto_hyphens': 'SuppressAutoHyphens',
'suppress_line_numbers': 'SuppressLineNumbers',
'widow_control': 'WidowControl'
}
def __init__(self, link=None, add_space_between_far_east_and_alpha=None, add_space_between_far_east_and_digit=None, alignment=None, bidi=None, drop_cap_position=None, first_line_indent=None, keep_together=None, keep_with_next=None, left_indent=None, line_spacing=None, line_spacing_rule=None, lines_to_drop=None, no_space_between_paragraphs_of_same_style=None, outline_level=None, page_break_before=None, right_indent=None, shading=None, space_after=None, space_after_auto=None, space_before=None, space_before_auto=None, style_identifier=None, style_name=None, suppress_auto_hyphens=None, suppress_line_numbers=None, widow_control=None):
self._link = None
self._add_space_between_far_east_and_alpha = None
self._add_space_between_far_east_and_digit = None
self._alignment = None
self._bidi = None
self._drop_cap_position = None
self._first_line_indent = None
self._keep_together = None
self._keep_with_next = None
self._left_indent = None
self._line_spacing = None
self._line_spacing_rule = None
self._lines_to_drop = None
self._no_space_between_paragraphs_of_same_style = None
self._outline_level = None
self._page_break_before = None
self._right_indent = None
self._shading = None
self._space_after = None
self._space_after_auto = None
self._space_before = None
self._space_before_auto = None
self._style_identifier = None
self._style_name = None
self._suppress_auto_hyphens = None
self._suppress_line_numbers = None
self._widow_control = None
self.discriminator = None
if link is not None:
self.link = link
if add_space_between_far_east_and_alpha is not None:
self.add_space_between_far_east_and_alpha = add_space_between_far_east_and_alpha
if add_space_between_far_east_and_digit is not None:
self.add_space_between_far_east_and_digit = add_space_between_far_east_and_digit
if alignment is not None:
self.alignment = alignment
if bidi is not None:
self.bidi = bidi
if drop_cap_position is not None:
self.drop_cap_position = drop_cap_position
if first_line_indent is not None:
self.first_line_indent = first_line_indent
if keep_together is not None:
self.keep_together = keep_together
if keep_with_next is not None:
self.keep_with_next = keep_with_next
if left_indent is not None:
self.left_indent = left_indent
if line_spacing is not None:
self.line_spacing = line_spacing
if line_spacing_rule is not None:
self.line_spacing_rule = line_spacing_rule
if lines_to_drop is not None:
self.lines_to_drop = lines_to_drop
if no_space_between_paragraphs_of_same_style is not None:
self.no_space_between_paragraphs_of_same_style = no_space_between_paragraphs_of_same_style
if outline_level is not None:
self.outline_level = outline_level
if page_break_before is not None:
self.page_break_before = page_break_before
if right_indent is not None:
self.right_indent = right_indent
if shading is not None:
self.shading = shading
if space_after is not None:
self.space_after = space_after
if space_after_auto is not None:
self.space_after_auto = space_after_auto
if space_before is not None:
self.space_before = space_before
if space_before_auto is not None:
self.space_before_auto = space_before_auto
if style_identifier is not None:
self.style_identifier = style_identifier
if style_name is not None:
self.style_name = style_name
if suppress_auto_hyphens is not None:
self.suppress_auto_hyphens = suppress_auto_hyphens
if suppress_line_numbers is not None:
self.suppress_line_numbers = suppress_line_numbers
if widow_control is not None:
self.widow_control = widow_control
@property
def link(self):
return self._link
@link.setter
def link(self, link):
self._link = link
@property
def add_space_between_far_east_and_alpha(self):
return self._add_space_between_far_east_and_alpha
@add_space_between_far_east_and_alpha.setter
def add_space_between_far_east_and_alpha(self, add_space_between_far_east_and_alpha):
self._add_space_between_far_east_and_alpha = add_space_between_far_east_and_alpha
@property
def add_space_between_far_east_and_digit(self):
return self._add_space_between_far_east_and_digit
@add_space_between_far_east_and_digit.setter
def add_space_between_far_east_and_digit(self, add_space_between_far_east_and_digit):
self._add_space_between_far_east_and_digit = add_space_between_far_east_and_digit
@property
def alignment(self):
return self._alignment
@alignment.setter
def alignment(self, alignment):
allowed_values = ["Left", "Center", "Right", "Justify", "Distributed", "ArabicMediumKashida", "ArabicHighKashida", "ArabicLowKashida", "ThaiDistributed"]
if not alignment.isdigit():
if alignment not in allowed_values:
raise ValueError(
"Invalid value for `alignment` ({0}), must be one of {1}"
.format(alignment, allowed_values))
self._alignment = alignment
else:
self._alignment = allowed_values[int(alignment) if six.PY3 else long(alignment)]
@property
def bidi(self):
return self._bidi
@bidi.setter
def bidi(self, bidi):
self._bidi = bidi
@property
def drop_cap_position(self):
return self._drop_cap_position
@drop_cap_position.setter
def drop_cap_position(self, drop_cap_position):
allowed_values = ["None", "Normal", "Margin"]
if not drop_cap_position.isdigit():
if drop_cap_position not in allowed_values:
raise ValueError(
"Invalid value for `drop_cap_position` ({0}), must be one of {1}"
.format(drop_cap_position, allowed_values))
self._drop_cap_position = drop_cap_position
else:
self._drop_cap_position = allowed_values[int(drop_cap_position) if six.PY3 else long(drop_cap_position)]
@property
def first_line_indent(self):
return self._first_line_indent
@first_line_indent.setter
def first_line_indent(self, first_line_indent):
self._first_line_indent = first_line_indent
@property
def keep_together(self):
return self._keep_together
@keep_together.setter
def keep_together(self, keep_together):
self._keep_together = keep_together
@property
def keep_with_next(self):
return self._keep_with_next
@keep_with_next.setter
def keep_with_next(self, keep_with_next):
self._keep_with_next = keep_with_next
@property
def left_indent(self):
return self._left_indent
@left_indent.setter
def left_indent(self, left_indent):
self._left_indent = left_indent
@property
|
MIT License
|
zepmanbc/creopyson
|
creopyson/layer.py
|
show
|
python
|
def show(client, name=None, file_=None, show_=None):
data = {}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if name is not None:
data["name"] = name
if show_ is not None:
data["show"] = show_
return client._creoson_post("layer", "show", data)
|
how/Hide one or more layers.
Args:
client (obj):
creopyson Client.
name (str, optional):
Layer name (wildcards allowed: True).
Defaults: All layers are listed.
`file_` (str, optional):
File name (wildcards allowed: True).
Defaults is current active model.
`show_` (boolean, optional):
Whether to show or hide the layers.
Defaults is True (show).
Returns:
None
|
https://github.com/zepmanbc/creopyson/blob/ab99d6c28780f5967d5daaaa59f5dbfd4fd96600/creopyson/layer.py#L97-L128
|
def delete(client, name=None, file_=None):
data = {"name": name}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if name is not None:
data["name"] = name
return client._creoson_post("layer", "delete", data)
def exists(client, name=None, file_=None):
data = {}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if name is not None:
data["name"] = name
return client._creoson_post("layer", "exists", data, "exists")
def list_(client, name=None, file_=None):
data = {}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if name is not None:
data["name"] = name
return client._creoson_post("layer", "list", data, "layers")
|
MIT License
|
yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
status
|
python
|
def status():
return raw_status().text
|
Get the Mesos maintenance status. This contains hostname/ip mappings for hosts that are either marked as being
down for maintenance or draining.
:returns: Text representation of the status
|
https://github.com/yelp/paasta/blob/bc1716253bbe003cec01bd02016010910c2b039c/paasta_tools/mesos_maintenance.py#L758-L763
|
import argparse
import datetime
import json
import logging
from socket import gaierror
from socket import getfqdn
from socket import gethostbyname
from typing import List
from typing import NamedTuple
from typing import Optional
import a_sync
from dateutil import parser
from pytimeparse import timeparse
from requests import Request
from requests import Session
from requests.exceptions import HTTPError
from paasta_tools.mesos_tools import get_count_running_tasks_on_slave
from paasta_tools.mesos_tools import get_mesos_config_path
from paasta_tools.mesos_tools import get_mesos_leader
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.mesos_tools import MESOS_MASTER_PORT
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import time_cache
from paasta_tools.utils import to_bytes
log = logging.getLogger(__name__)
class Hostname(NamedTuple):
host: str
ip: str
class Credentials(NamedTuple):
file: str
principal: str
secret: str
class Resource(NamedTuple):
name: str
amount: int
MAINTENANCE_ROLE = "maintenance"
def base_api(mesos_config_path: Optional[str] = None):
leader = get_mesos_leader(mesos_config_path)
def execute_request(method, endpoint, timeout=(3, 2), **kwargs):
url = "http://%s:%d%s" % (leader, MESOS_MASTER_PORT, endpoint)
s = Session()
s.auth = (get_principal(), get_secret())
req = Request(method, url, **kwargs)
prepared = s.prepare_request(req)
try:
resp = s.send(prepared, timeout=timeout)
resp.raise_for_status()
return resp
except HTTPError:
raise HTTPError("Error executing API request calling %s." % url)
return execute_request
def master_api(mesos_config_path: Optional[str] = None):
def execute_master_api_request(method, endpoint, **kwargs):
base_api_client = base_api(mesos_config_path=mesos_config_path)
return base_api_client(method, "/master%s" % endpoint, **kwargs)
return execute_master_api_request
def operator_api(mesos_config_path: Optional[str] = None):
def execute_operator_api_request(**kwargs):
base_api_client = base_api(mesos_config_path=mesos_config_path)
if "headers" in kwargs:
kwargs["headers"]["Content-Type"] = "application/json"
else:
kwargs["headers"] = {"Content-Type": "application/json"}
data = kwargs.pop("data")
return base_api_client("POST", "/api/v1", data=json.dumps(data), **kwargs)
return execute_operator_api_request
def reserve_api():
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request
def unreserve_api():
def execute_unreserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/unreserve%s" % endpoint, **kwargs)
return execute_unreserve_api_request
def maintenance_api():
def execute_schedule_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(
method, "/maintenance%s" % endpoint, timeout=(3, 10), **kwargs
)
return execute_schedule_api_request
def get_schedule_client():
def execute_schedule_api_request(method, endpoint, **kwargs):
maintenance_api_client = maintenance_api()
return maintenance_api_client(method, "/schedule%s" % endpoint, **kwargs)
return execute_schedule_api_request
def get_maintenance_schedule():
client_fn = operator_api()
return client_fn(data={"type": "GET_MAINTENANCE_SCHEDULE"})
@time_cache(ttl=10)
def get_maintenance_status(mesos_config_path: Optional[str] = None):
client_fn = operator_api(mesos_config_path=mesos_config_path)
return client_fn(data={"type": "GET_MAINTENANCE_STATUS"})
def schedule():
try:
schedule = get_maintenance_schedule()
except HTTPError:
raise HTTPError("Error getting maintenance schedule.")
return schedule.text
def get_hosts_with_state(
state, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> List[str]:
mesos_config_path = get_mesos_config_path(system_paasta_config)
try:
status = get_maintenance_status(mesos_config_path).json()
status = status["get_maintenance_status"]["status"]
except HTTPError:
raise HTTPError("Error getting maintenance status.")
if not status or state not in status:
return []
if "id" in status[state][0]:
return [machine["id"]["hostname"] for machine in status[state]]
else:
return [machine["hostname"] for machine in status[state]]
def get_draining_hosts(system_paasta_config: Optional[SystemPaastaConfig] = None):
return get_hosts_with_state(
state="draining_machines", system_paasta_config=system_paasta_config
)
def get_down_hosts():
return get_hosts_with_state(state="down_machines")
def is_host_draining(hostname=getfqdn()):
return hostname in get_draining_hosts()
def is_host_down(hostname=getfqdn()):
return hostname in get_down_hosts()
def get_hosts_forgotten_draining(grace=0):
draining_hosts = get_draining_hosts()
log.debug("draining_hosts: %s" % draining_hosts)
hosts_past_maintenance_start = get_hosts_past_maintenance_start(grace=grace)
log.debug("hosts_past_maintenance_start: %s" % hosts_past_maintenance_start)
forgotten_draining = list(
set(draining_hosts).intersection(hosts_past_maintenance_start)
)
log.debug("forgotten_draining: %s" % forgotten_draining)
return forgotten_draining
def are_hosts_forgotten_draining():
return bool(get_hosts_forgotten_draining())
def get_hosts_forgotten_down(grace=0):
down_hosts = get_down_hosts()
log.debug("down_hosts: %s" % down_hosts)
hosts_past_maintenance_end = get_hosts_past_maintenance_end(grace=grace)
log.debug("hosts_past_maintenance_end: %s" % hosts_past_maintenance_end)
forgotten_down = list(set(down_hosts).intersection(hosts_past_maintenance_end))
log.debug("forgotten_down: %s" % forgotten_down)
return forgotten_down
def are_hosts_forgotten_down():
return bool(get_hosts_forgotten_down())
def parse_timedelta(value):
error_msg = "'%s' is not a valid time expression" % value
try:
seconds = timeparse.timeparse(value)
except TypeError:
raise argparse.ArgumentTypeError(error_msg)
if not seconds:
raise argparse.ArgumentTypeError(error_msg)
return seconds_to_nanoseconds(seconds)
def parse_datetime(value):
error_msg = "'%s' is not a valid datetime expression" % value
try:
dt = parser.parse(value)
except Exception:
raise argparse.ArgumentTypeError(error_msg)
if not dt:
raise argparse.ArgumentTypeError(error_msg)
return datetime_to_nanoseconds(dt)
def datetime_seconds_from_now(seconds):
return now() + datetime.timedelta(seconds=seconds)
def now():
return datetime.datetime.now()
def seconds_to_nanoseconds(seconds):
return seconds * 1000000000
def datetime_to_nanoseconds(dt):
return seconds_to_nanoseconds(int(dt.strftime("%s")))
def build_maintenance_payload(hostnames, maint_type):
return {
"type": maint_type.upper(),
maint_type.lower(): {"machines": get_machine_ids(hostnames)},
}
def hostnames_to_components(hostnames, resolve=False):
components = []
for hostname in hostnames:
if "|" in hostname:
(host, ip) = hostname.split("|")
components.append(Hostname(host=host, ip=ip))
else:
try:
ip = gethostbyname(hostname) if resolve else None
except gaierror:
log.error(f"Failed to resolve IP for {hostname}, continuing regardless")
continue
components.append(Hostname(host=hostname, ip=ip))
return components
def get_machine_ids(hostnames):
machine_ids = []
components = hostnames_to_components(hostnames, resolve=True)
for component in components:
machine_id = {"hostname": component.host, "ip": component.ip}
machine_ids.append(machine_id)
return machine_ids
def build_reservation_payload(resources):
payload = []
for resource in resources:
payload.append(
{
"name": resource.name,
"type": "SCALAR",
"scalar": {"value": resource.amount},
"role": MAINTENANCE_ROLE,
"reservation": {"principal": get_principal()},
}
)
return payload
def build_maintenance_schedule_payload(
hostnames, start=None, duration=None, drain=True
):
schedule = get_maintenance_schedule().json()["get_maintenance_schedule"]["schedule"]
machine_ids = get_machine_ids(hostnames)
if drain:
unavailability = dict()
unavailability["start"] = dict()
unavailability["start"]["nanoseconds"] = int(start)
unavailability["duration"] = dict()
unavailability["duration"]["nanoseconds"] = int(duration)
window = dict()
window["machine_ids"] = machine_ids
window["unavailability"] = unavailability
if schedule:
for existing_window in schedule["windows"]:
for existing_machine_id in existing_window["machine_ids"]:
if existing_machine_id in machine_ids:
existing_window["machine_ids"].remove(existing_machine_id)
if not existing_window["machine_ids"]:
schedule["windows"].remove(existing_window)
if drain:
windows = schedule["windows"] + [window]
else:
windows = schedule["windows"]
elif drain:
windows = [window]
else:
windows = []
payload = dict()
payload["windows"] = windows
return {
"type": "UPDATE_MAINTENANCE_SCHEDULE",
"update_maintenance_schedule": {"schedule": payload},
}
def load_credentials(mesos_secrets="/nail/etc/mesos-slave-secret"):
try:
with open(mesos_secrets) as data_file:
data = json.load(data_file)
except EnvironmentError:
log.error(
"maintenance calls must be run on a Mesos slave containing valid credentials (%s)"
% mesos_secrets
)
raise
try:
username = data["principal"]
password = data["secret"]
except KeyError:
log.error(
"%s does not contain Mesos slave credentials in the expected format. "
"See http://mesos.apache.org/documentation/latest/authentication/ for details"
% mesos_secrets
)
raise
return Credentials(file=mesos_secrets, principal=username, secret=password)
def get_principal(mesos_secrets="/nail/etc/mesos-slave-secret"):
return load_credentials(mesos_secrets).principal
def get_secret(mesos_secrets="/nail/etc/mesos-slave-secret"):
return load_credentials(mesos_secrets).secret
def _make_request_payload(slave_id, reservation_payload):
return {
"slaveId": slave_id.encode("UTF-8"),
"resources": to_bytes(json.dumps(reservation_payload)).replace(b"+", b"%20"),
}
def _make_operator_reservation_request_payload(slave_id, payload, request_type):
return {
"type": request_type.upper(),
request_type.lower(): {"agent_id": {"value": slave_id}},
"resources": payload,
}
def reserve(slave_id, resources):
log.info(f"Dynamically reserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="reserve_resources",
)
client_fn = operator_api()
try:
print(payload)
reserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic reservation.")
return reserve_output
def unreserve(slave_id, resources):
log.info(f"Dynamically unreserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="unreserve_resources",
)
client_fn = operator_api()
try:
unreserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic unreservation.")
return unreserve_output
def components_to_hosts(components):
hosts = []
for component in components:
hosts.append(component.host)
return hosts
def reserve_all_resources(hostnames):
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Reserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
for resource in ["disk", "mem", "cpus", "gpus"]:
free_resource = (
slave["resources"][resource] - slave["used_resources"][resource]
)
for role in slave["reserved_resources"]:
free_resource -= slave["reserved_resources"][role][resource]
resources.append(Resource(name=resource, amount=free_resource))
try:
reserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed reserving all of the resources on {hostname} ({slave_id}). Aborting."
)
def unreserve_all_resources(hostnames):
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Unreserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
if MAINTENANCE_ROLE in slave["reserved_resources"]:
for resource in ["disk", "mem", "cpus", "gpus"]:
reserved_resource = slave["reserved_resources"][MAINTENANCE_ROLE][
resource
]
resources.append(Resource(name=resource, amount=reserved_resource))
try:
unreserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed unreserving all of the resources on {hostname} ({slave_id}). Aborting."
)
def drain(hostnames, start, duration, reserve_resources=True):
log.info("Draining: %s" % hostnames)
if reserve_resources:
try:
reserve_all_resources(hostnames)
except HTTPError as e:
log.warning("Failed to reserve resources, will continue to drain: %s" % e)
payload = build_maintenance_schedule_payload(hostnames, start, duration, drain=True)
client_fn = operator_api()
try:
drain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance drain.")
return drain_output
def undrain(hostnames, unreserve_resources=True):
log.info("Undraining: %s" % hostnames)
if unreserve_resources:
try:
unreserve_all_resources(hostnames)
except HTTPError as e:
log.warning(
"Failed to unreserve resources, will continue to undrain: %s" % e
)
payload = build_maintenance_schedule_payload(hostnames, drain=False)
client_fn = get_schedule_client()
client_fn = operator_api()
try:
undrain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance undrain.")
return undrain_output
def down(hostnames):
log.info("Bringing down: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "start_maintenance")
client_fn = operator_api()
try:
down_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance down.")
return down_output
def up(hostnames):
log.info("Bringing up: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "stop_maintenance")
client_fn = operator_api()
try:
up_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance up.")
return up_output
def raw_status():
try:
status = get_maintenance_status()
except HTTPError:
raise HTTPError("Error performing maintenance status.")
return status
|
Apache License 2.0
|
kubernetes-client/python
|
kubernetes/client/api/scheduling_v1beta1_api.py
|
SchedulingV1beta1Api.list_priority_class_with_http_info
|
python
|
def list_priority_class_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None:
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1beta1/priorityclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1PriorityClassList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
list_priority_class # noqa: E501
list or watch objects of kind PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_priority_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1PriorityClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
|
https://github.com/kubernetes-client/python/blob/96dade6021dc2e9ee1430172e1b65d9e9e232b10/kubernetes/client/api/scheduling_v1beta1_api.py#L621-L740
|
from __future__ import absolute_import
import re
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import (
ApiTypeError,
ApiValueError
)
class SchedulingV1beta1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_priority_class(self, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_priority_class_with_http_info(body, **kwargs)
def create_priority_class_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_priority_class`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1beta1/priorityclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1PriorityClass',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_priority_class(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_priority_class_with_http_info(**kwargs)
def delete_collection_priority_class_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None:
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1beta1/priorityclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_priority_class(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_priority_class_with_http_info(name, **kwargs)
def delete_priority_class_with_http_info(self, name, **kwargs):
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_priority_class`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1beta1/priorityclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs)
def get_api_resources_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_priority_class(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_priority_class_with_http_info(**kwargs)
|
Apache License 2.0
|
ploxiln/fab-classic
|
fabric/contrib/files.py
|
contains
|
python
|
def contains(filename, text, exact=False, use_sudo=False, escape=True,
shell=False, case_sensitive=True):
func = use_sudo and sudo or run
if escape:
text = _escape_for_regex(text)
if exact:
text = "^%s$" % text
with settings(hide('everything'), warn_only=True):
egrep_cmd = 'egrep "%s" %s' % (text, filename.replace(' ', r'\ '))
if not case_sensitive:
egrep_cmd = egrep_cmd.replace('egrep', 'egrep -i', 1)
return func(egrep_cmd, shell=shell).succeeded
|
Return True if ``filename`` contains ``text`` (which may be a regex.)
By default, this function will consider a partial line match (i.e. where
``text`` only makes up part of the line it's on). Specify ``exact=True`` to
change this behavior so that only a line containing exactly ``text``
results in a True return value.
This function leverages ``egrep`` on the remote end (so it may not follow
Python regular expression syntax perfectly), and skips ``env.shell``
wrapper by default.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
If ``escape`` is False, no extra regular expression related escaping is
performed (this includes overriding ``exact`` so that no ``^``/``$`` is
added.)
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argument in ``~fabric.contrib.sed`` for details.
If ``case_sensitive`` is False, the `-i` flag will be passed to ``egrep``.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionchanged:: 1.4
Added ``escape`` keyword argument.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
.. versionadded:: 1.11
Added the ``case_sensitive`` keyword argument.
|
https://github.com/ploxiln/fab-classic/blob/6adf6765e87f694a70f32fedc197a754f7bf94f3/fabric/contrib/files.py#L333-L380
|
import hashlib
import os
import six
from functools import partial
from fabric.api import run, sudo, hide, settings, env, put, abort
from fabric.utils import apply_lcwd
def exists(path, use_sudo=False, verbose=False):
func = use_sudo and sudo or run
cmd = 'stat %s' % path.replace(' ', r'\ ')
if verbose:
with settings(warn_only=True):
return not func(cmd).failed
with settings(hide('everything'), warn_only=True):
return not func(cmd).failed
def is_link(path, use_sudo=False, verbose=False):
func = sudo if use_sudo else run
cmd = 'test -L %s' % path.replace(' ', r'\ ')
args, kwargs = [], {'warn_only': True}
if not verbose:
args = [hide('everything')]
with settings(*args, **kwargs):
return func(cmd).succeeded
def first(*args, **kwargs):
for directory in args:
if exists(directory, **kwargs):
return directory
def upload_template(filename, destination, context=None, use_jinja=False,
template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False,
mode=None, pty=None, keep_trailing_newline=False, temp_dir=''):
func = use_sudo and sudo or run
if pty is not None:
func = partial(func, pty=pty)
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination.replace(' ', r'\ ')).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
if mirror_local_mode and mode is None:
mode = os.stat(apply_lcwd(filename, env)).st_mode
mirror_local_mode = False
text = None
if use_jinja:
try:
template_dir = template_dir or os.getcwd()
template_dir = apply_lcwd(template_dir, env)
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir),
keep_trailing_newline=keep_trailing_newline)
text = jenv.get_template(filename).render(**context or {})
text = text.encode('utf-8')
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
if template_dir:
filename = os.path.join(template_dir, filename)
filename = apply_lcwd(filename, env)
with open(os.path.expanduser(filename)) as inputfile:
text = inputfile.read()
if context:
text = text % context
if backup and exists(destination):
target = destination.replace(' ', r'\ ')
func("cp %s %s.bak" % (target, target))
if six.PY3 is True and isinstance(text, bytes):
text = text.decode('utf-8')
return put(
local_path=six.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode,
temp_dir=temp_dir
)
def sed(filename, before, after, limit='', use_sudo=False, backup='.bak',
flags='', shell=False):
func = use_sudo and sudo or run
for char in "/'":
before = before.replace(char, r'\%s' % char)
after = after.replace(char, r'\%s' % char)
for char in "()":
after = after.replace(char, r'\%s' % char)
if limit:
limit = r'/%s/ ' % limit
context = {
'script': r"'%ss/%s/%s/%sg'" % (limit, before, after, flags),
'filename': filename.replace(' ', r'\ '),
'backup': backup
}
with hide('running', 'stdout'):
platform = run("uname", shell=False, pty=False)
if platform in ('NetBSD', 'OpenBSD', 'QNX'):
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(filename)
context['tmp'] = "/tmp/%s" % hasher.hexdigest()
expr = r"""cp -p %(filename)s %(tmp)s \
&& sed -r -e %(script)s %(filename)s > %(tmp)s \
&& cp -p %(filename)s %(filename)s%(backup)s \
&& mv %(tmp)s %(filename)s"""
else:
context['extended_regex'] = '-E' if platform == 'Darwin' else '-r'
expr = r"sed -i%(backup)s %(extended_regex)s -e %(script)s %(filename)s"
command = expr % context
return func(command, shell=shell)
def uncomment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
return sed(
filename,
before=r'^([[:space:]]*)%s[[:space:]]?' % char,
after=r'\1',
limit=regex,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def comment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
carot, dollar = '', ''
if regex.startswith('^'):
carot = '^'
regex = regex[1:]
if regex.endswith('$'):
dollar = '$'
regex = regex[:-1]
regex = "%s(%s)%s" % (carot, regex, dollar)
return sed(
filename,
before=regex,
after=r'%s\1' % char,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
|
BSD 2-Clause Simplified License
|
nicta/dora
|
dora/active_sampling/gp_sampler.py
|
GaussianProcess.update_regressors
|
python
|
def update_regressors(self):
if self.hyperparams is None:
return
self.regressors = []
for i_task in range(self.n_tasks):
self.regressors.append(
gp.condition(self.X(), self.y()[:, i_task] -
self.y_mean[i_task],
self.kerneldef, self.hyperparams[i_task]))
|
Update the regressors of the Gaussian process model.
Only makes sense to do this after hyperparameters are learned
.. note :: [Properties Modified]
regressors
.. note :: [Further Work] Use Cholesky Update here correctly to cache
regressors and improve efficiency
|
https://github.com/nicta/dora/blob/1929daa9f957a4ff42e688b116faa7699b3b1168/dora/active_sampling/gp_sampler.py#L175-L198
|
import logging
from dora.active_sampling.base_sampler import Sampler, random_sample
import revrand.legacygp as gp
import numpy as np
import scipy.stats as stats
log = logging.getLogger(__name__)
class GaussianProcess(Sampler):
name = 'GaussianProcess'
def __init__(self, lower, upper, kerneldef=None, n_train=50,
acq_name='var_sum', explore_priority=1., seed=None):
Sampler.__init__(self, lower, upper)
self.kerneldef = kerneldef
self.n_min = n_train
self.acq_name = acq_name
self.explore_priority = explore_priority
self.hyperparams = None
self.regressors = None
self.y_mean = None
self.n_tasks = None
if seed:
np.random.seed(seed)
def add_data(self, X, y, train=False):
[self.X.append(xi) for xi in X]
[self.y.append(np.atleast_1d(yi)) for yi in y]
[self.virtual_flag.append(False) for yi in y]
if train:
self.train()
def update_y_mean(self):
if not self.y:
return
self.y_mean = self.y().mean(axis=0) if len(self.y) else None
if self.n_tasks is None:
self.n_tasks = self.y_mean.shape[0]
else:
assert self.n_tasks == self.y_mean.shape[0]
def learn_hyperparams(self, verbose=False, ftol=1e-15, maxiter=2000):
self.update_y_mean()
logging.info('Training hyperparameters...')
snlml = gp.criterions.stacked_negative_log_marginal_likelihood
hyperparams = gp.learn(self.X(), self.y(), self.kerneldef,
opt_criterion=snlml,
verbose=verbose, ftol=ftol, maxiter=maxiter)
logging.info('Done.')
return [hyperparams for i_task in range(self.n_tasks)]
|
Apache License 2.0
|
tektoncd/experimental
|
sdk/python/tekton_pipeline/models/v1beta1_workspace_usage.py
|
V1beta1WorkspaceUsage.__eq__
|
python
|
def __eq__(self, other):
if not isinstance(other, V1beta1WorkspaceUsage):
return False
return self.to_dict() == other.to_dict()
|
Returns true if both objects are equal
|
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_workspace_usage.py#L154-L159
|
import pprint
import re
import six
from tekton_pipeline.configuration import Configuration
class V1beta1WorkspaceUsage(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'mount_path': 'str',
'name': 'str'
}
attribute_map = {
'mount_path': 'mountPath',
'name': 'name'
}
def __init__(self, mount_path='', name='', local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._mount_path = None
self._name = None
self.discriminator = None
self.mount_path = mount_path
self.name = name
@property
def mount_path(self):
return self._mount_path
@mount_path.setter
def mount_path(self, mount_path):
if self.local_vars_configuration.client_side_validation and mount_path is None:
raise ValueError("Invalid value for `mount_path`, must not be `None`")
self._mount_path = mount_path
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if self.local_vars_configuration.client_side_validation and name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
|
Apache License 2.0
|
yeonv/ledfxrm
|
custom_components/ledfxrm/switch.py
|
LedfxrmBinarySwitch.name
|
python
|
def name(self):
return START_KILL_SERVER
|
Return the name of the switch.
|
https://github.com/yeonv/ledfxrm/blob/ab2c6cc5ef64976104d16ae87199809d47cc4abb/custom_components/ledfxrm/switch.py#L46-L48
|
from homeassistant.components.switch import SwitchEntity
from custom_components.ledfxrm.const import (
DEFAULT_NAME,
DOMAIN,
ICON_POWER,
SWITCH,
START_KILL_SERVER,
)
from custom_components.ledfxrm.entity import LedfxrmEntity
import logging
async def async_setup_entry(hass, entry, async_add_displays):
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_displays([LedfxrmBinarySwitch(coordinator, entry)])
class LedfxrmBinarySwitch(LedfxrmEntity, SwitchEntity):
async def async_turn_on(self, **kwargs):
await self.coordinator.api.async_change_something(True)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
await self.coordinator.api.async_change_something(False)
await self.coordinator.async_request_refresh()
@property
def unique_id(self):
return self.config_entry.entry_id + "_switch"
@property
def assumed_state(self):
return True
@property
|
MIT License
|
uarm-developer/pyuarm
|
pyuarm/tools/miniterm.py
|
UArmCmd.do_set_position
|
python
|
def do_set_position(self, arg):
if self.__is_connected():
values = arg.split(' ')
if len(values) == 3:
result = self.arm.set_position(int(values[0]), int(values[1]), int(values[2]), wait=True)
elif len(values) == 4:
result = self.arm.set_position(int(values[0]), int(values[1]), int(values[2]), speed=int(values[3]), wait=True)
|
set_position, move to destination coordinate.
format: set_position X Y Z or move_to X Y Z S
X,Y,Z unit millimeter, S means Speed, unit mm/s
eg. set_position 100 200 150
|
https://github.com/uarm-developer/pyuarm/blob/17dc08c6c5a6533064614aecdcdcc41052554a55/pyuarm/tools/miniterm.py#L107-L123
|
from cmd import Cmd
from .list_uarms import get_uarm_port_cli, uarm_ports
from ..uarm import UArm, UArmConnectException
from ..log import DEBUG, printf
version = "0.1.6"
class UArmCmd(Cmd):
help_msg = "Shortcut:" + "\n"
help_msg += "Quit: " + "Ctrl + D"
help_msg += ", or input: " + "quit" + "\n"
help_msg += "Clear Screen: " + "Ctrl + L"
ON_OFF = ['on', 'off']
FIRMWARE = ['version', 'force', 'upgrade']
SERVO_STATUS = ['attach', 'detach']
prompt = ">>> "
intro = "Welcome to use uArm Command Line - v{}\n" .format(version)
intro += help_msg
intro += "\n\n"
intro += "Input help for more usage"
ruler = '-'
arm = None
def __init__(self, port=None, debug=False, *args, **kwargs):
Cmd.__init__(self, *args, **kwargs)
self.__connect(port=port, debug=debug)
def __is_connected(self):
if self.arm is None:
print("No uArm is connected, please use connect")
return False
else:
if self.arm.connection_state:
return True
else:
print("No uArm is connected, please use connect command")
return False
def do_connect(self, arg):
if len(arg) != 0:
self.__connect(arg)
elif len(arg) == 0:
self.__connect()
def __connect(self, port=None, debug=False, timeout=1):
if self.arm is None:
if port is not None:
self.arm = UArm(port_name=port, debug=debug, timeout=timeout)
else:
ports = uarm_ports()
if len(ports) > 1:
uarm_port = get_uarm_port_cli()
try:
self.arm = UArm(port_name=uarm_port, debug=debug, timeout=timeout)
except UArmConnectException as e:
print("uArm Connect failed, {}".format(str(e)))
elif len(ports) == 1:
self.arm = UArm(debug=debug, timeout=timeout)
self.arm.connect()
elif len(ports) == 0:
print("No uArm ports is found.")
else:
if self.arm.connection_state:
print("uArm is already connected, port: {}".format(self.arm.port_name))
else:
if self.arm.connect():
print("uArm port: {} is reconnected")
def do_disconnect(self, arg):
if self.arm is not None:
if self.arm.connection_state:
self.arm.disconnect()
|
MIT License
|
oshlack/mintie
|
annotate/annotate_contigs.py
|
get_next_letter
|
python
|
def get_next_letter(last_letter):
try:
next_letter_pos = np.where(ASCII_LETTERS == last_letter[-1])[0][0]+1
next_letter = list(string.ascii_letters)[next_letter_pos]
next_letter = last_letter[:-1] + next_letter if len(last_letter) > 1 else next_letter
return next_letter
except IndexError:
return last_letter + 'a'
|
Convenience function to get next letter in alphabet
|
https://github.com/oshlack/mintie/blob/88b28687bd31a35df6cf686ea524c5b14d05f3a6/annotate/annotate_contigs.py#L118-L129
|
import pandas as pd
import numpy as np
import pysam
import os
import pickle
import re
import logging
import sys
import string
import block_helper as bh
import constants
from argparse import ArgumentParser
from intervaltree import Interval, IntervalTree
from cv_vcf import CrypticVariant, VCF
from utils import cached, init_logging, exit_with_error
PROGRAM_NAME = "annotate_contigs"
ASCII_LETTERS = np.array(list(string.ascii_letters))
record = {}
def parse_args():
description = 'Annotate contigs'
parser = ArgumentParser(description=description)
parser.add_argument('--log',
metavar='LOG_FILE',
type=str,
help='record program progress in LOG_FILE')
parser.add_argument(dest='sample',
metavar='SAMPLE',
type=str,
help='''Sample name''')
parser.add_argument(dest='bam_file',
metavar='BAM_FILE',
type=str,
help='''SAM or BAM format file containing contig alignments''')
parser.add_argument(dest='junc_file',
metavar='JUNC_FILE',
type=str,
help='''Reference file containing transcripts and their
respective splice junctions.''')
parser.add_argument(dest='tx_ref_file',
type=str,
metavar='TX_REF_FILE',
help='''Transcriptiome GTF reference file.''')
parser.add_argument(dest='output_bam',
metavar='OUTPUT_BAM',
type=str,
help='''BAM file to write contigs which pass filtering''')
parser.add_argument(dest='contig_info_output',
metavar='CONTIG_INFO_OUT',
type=str,
help='''Contig info output file''')
parser.add_argument('--minClip',
metavar='MIN_CLIP',
type=int,
help='''Minimum novel block or softclip size.''')
parser.add_argument('--minGap',
metavar='MIN_GAP',
type=int,
help='''Minimum gap (deletion or insertion) size.''')
parser.add_argument('--minMatch',
metavar='MIN_MATCH',
type=str,
help='''Comma separated: <minumum bp matching reference>,
<minimum percent of contig aligned to reference>.''')
return parser.parse_args()
def set_globals(args):
global MIN_CLIP
global MIN_GAP
global MIN_MATCH_BP
global MIN_MATCH_PERC
if args.minClip:
MIN_CLIP = args.minClip
else:
MIN_CLIP = constants.DEFAULT_MIN_CLIP
if args.minGap:
MIN_GAP = args.minGap
else:
MIN_GAP = constants.DEFAULT_MIN_GAP
if args.minMatch:
min_bp, min_perc = args.minMatch.split(',')
MIN_MATCH_BP = int(min_bp)
MIN_MATCH_PERC = float(min_perc)
assert MIN_MATCH_BP > 0
assert MIN_MATCH_PERC <= 1 and MIN_MATCH_PERC > 0
else:
MIN_MATCH_BP = constants.DEFAULT_MIN_MATCH_BP
MIN_MATCH_PERC = constants.DEFAULT_MIN_MATCH_PERC
|
MIT License
|
thingsboard/python_tb_rest_client
|
tb_rest_client/models/models_pe/role.py
|
Role.owner_id
|
python
|
def owner_id(self):
return self._owner_id
|
Gets the owner_id of this Role. # noqa: E501
:return: The owner_id of this Role. # noqa: E501
:rtype: EntityId
|
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/role.py#L191-L198
|
import pprint
import re
import six
class Role(object):
swagger_types = {
'additional_info': 'str',
'created_time': 'int',
'customer_id': 'CustomerId',
'id': 'RoleId',
'name': 'str',
'owner_id': 'EntityId',
'permissions': 'str',
'tenant_id': 'TenantId',
'type': 'str'
}
attribute_map = {
'additional_info': 'additionalInfo',
'created_time': 'createdTime',
'customer_id': 'customerId',
'id': 'id',
'name': 'name',
'owner_id': 'ownerId',
'permissions': 'permissions',
'tenant_id': 'tenantId',
'type': 'type'
}
def __init__(self, additional_info=None, created_time=None, customer_id=None, id=None, name=None, owner_id=None, permissions=None, tenant_id=None, type=None):
self._additional_info = None
self._created_time = None
self._customer_id = None
self._id = None
self._name = None
self._owner_id = None
self._permissions = None
self._tenant_id = None
self._type = None
self.discriminator = None
if additional_info is not None:
self.additional_info = additional_info
if created_time is not None:
self.created_time = created_time
if customer_id is not None:
self.customer_id = customer_id
if id is not None:
self.id = id
if name is not None:
self.name = name
if owner_id is not None:
self.owner_id = owner_id
if permissions is not None:
self.permissions = permissions
if tenant_id is not None:
self.tenant_id = tenant_id
if type is not None:
self.type = type
@property
def additional_info(self):
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
self._additional_info = additional_info
@property
def created_time(self):
return self._created_time
@created_time.setter
def created_time(self, created_time):
self._created_time = created_time
@property
def customer_id(self):
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
self._customer_id = customer_id
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
|
Apache License 2.0
|
riotkit-org/infracheck
|
infracheck/infracheck/runner.py
|
Runner.run_checks
|
python
|
def run_checks(self, enabled_configs: list) -> None:
for config_name in enabled_configs:
result = None
check = self.config_loader.load(config_name)
if not result:
try:
result = self.run_single_check(check)
except CheckNotReadyShouldBeSkippedSignal:
continue
self.repository.push_to_cache(config_name, result)
if self.wait_time > 0:
time.sleep(self.wait_time)
|
Runs checks one-by-one and saves to cache
Notice: Executed on application COLLECTION stage
:param enabled_configs: List of enabled configuration files (json files)
:return:
|
https://github.com/riotkit-org/infracheck/blob/e328e9ad722e6e2906aca378270099017198f602/infracheck/infracheck/runner.py#L101-L125
|
import subprocess
import os
import json
import re
import time
from datetime import datetime, timedelta
from rkd.api.inputoutput import IO
from .exceptions import RunnerException, CheckNotReadyShouldBeSkippedSignal
from .model import ExecutedCheckResult, ExecutedChecksResultList, ConfiguredCheck
from .repository import Repository
from .config import ConfigLoader
from .rkd_support import is_rkd_check, prepare_rkd_check_bin_path, add_rkd_environment_variables
class Runner(object):
paths: list
timeout: int
wait_time: int
config_loader: ConfigLoader
repository: Repository
io: IO
def __init__(self, dirs: list, config_loader: ConfigLoader, repository: Repository, io: IO,
timeout: int = 1800, wait_time: int = 0):
self.timeout = timeout
self.wait_time = wait_time
self.paths = []
self.config_loader = config_loader
self.repository = repository
self.io = io
for path in dirs:
self.paths.append(path + '/checks')
def run_single_check(self, check: ConfiguredCheck) -> ExecutedCheckResult:
if not check.should_check_run(self.repository.find_cache_time(check.name)):
self.io.debug('Check "{}" not ready to run, maybe the cache life time prevents from execution'
.format(check.name))
raise CheckNotReadyShouldBeSkippedSignal(check.name)
self.io.debug('Executing check {}'.format(check.name))
bin_path = self._get_check_path(check.check_type)
bin_path = self._append_commandline_switches(check.input_variables, bin_path)
try:
self.io.debug('bin_path=' + str(bin_path))
env = {**dict(os.environ), **self._prepare_data(check.check_type, check.input_variables)}
env = self._add_environment_variables(env, check.check_type)
timeout = env['INFRACHECK_TIMEOUT'] if 'INFRACHECK_TIMEOUT' in env else self.timeout
output = subprocess.check_output(bin_path, env=env, stderr=subprocess.PIPE, timeout=timeout)
exit_status = True
except subprocess.CalledProcessError as e:
output = e.output + e.stderr
self.io.warn('{} returned error: {}'.format(check.name, output.decode('utf-8')))
exit_status = False
except subprocess.TimeoutExpired as e:
output = b'Timed out: '
if e.output:
output += e.output
if e.stderr:
output += e.stderr
self.io.error('{} timed out and returned: {}'.format(check.name, output.decode('utf-8')))
exit_status = False
self.io.debug('Execution finished, running hooks...')
hooks_out = self._notify_hooks(check.hooks, exit_status, self.io)
return ExecutedCheckResult(
output=output.decode('utf-8'),
exit_status=exit_status,
hooks_output=hooks_out,
configured_name=check.name,
description=check.description
)
|
Apache License 2.0
|
christiantremblay/pyhaystack
|
pyhaystack/client/widesky.py
|
WideskyHaystackSession.__init__
|
python
|
def __init__(
self,
uri,
username,
password,
client_id,
client_secret,
api_dir="api",
auth_dir="oauth2/token",
impersonate=None,
**kwargs
):
super(WideskyHaystackSession, self).__init__(uri, api_dir, **kwargs)
self._auth_dir = auth_dir
self._username = username
self._password = password
self._client_id = client_id
self._client_secret = client_secret
self._auth_result = None
self._impersonate = impersonate
|
Initialise a VRT Widesky Project Haystack session handler.
:param uri: Base URI for the Haystack installation.
:param username: Authentication user name.
:param password: Authentication password.
:param client_id: Authentication client ID.
:param client_secret: Authentication client secret.
:param impersonate: A widesky user ID to impersonate (or None)
|
https://github.com/christiantremblay/pyhaystack/blob/3c3508212c0f5a4e5f254fed4d169da91f03242d/pyhaystack/client/widesky.py#L48-L77
|
from time import time
from .session import HaystackSession
from .ops.vendor.widesky import (
WideskyAuthenticateOperation,
CreateEntityOperation,
WideSkyHasFeaturesOperation,
WideSkyPasswordChangeOperation,
)
from .mixins.vendor.widesky import crud, multihis, password
from ..util.asyncexc import AsynchronousException
from .http.exceptions import HTTPStatusError
def _decode_str(s, enc="utf-8"):
try:
return s.decode(enc)
except AttributeError:
return s
class WideskyHaystackSession(
crud.CRUDOpsMixin,
multihis.MultiHisOpsMixin,
password.PasswordOpsMixin,
HaystackSession,
):
_AUTH_OPERATION = WideskyAuthenticateOperation
_CREATE_ENTITY_OPERATION = CreateEntityOperation
_HAS_FEATURES_OPERATION = WideSkyHasFeaturesOperation
_PASSWORD_CHANGE_OPERATION = WideSkyPasswordChangeOperation
|
Apache License 2.0
|
ganymede42/h5pyviewer
|
h5pyViewer/userSample.py
|
waveforms
|
python
|
def waveforms(hid):
ds=h5py.Dataset(hid)
plt.plot( ds[-10,:,:])
plt.show()
|
plots the 10th last waveforms
|
https://github.com/ganymede42/h5pyviewer/blob/065bd2361df31fd38fa2eec087c91d1c2c06b1ab/h5pyViewer/userSample.py#L29-L33
|
import h5py
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def plot1d(hid):
print hid
ds=h5py.Dataset(hid)
ds
plt.plot(ds[0,:])
plt.show()
def test1(hid):
print hid
ds=h5py.Dataset(hid)
ds
plt.plot(ds[:,1])
plt.show()
def test2(hid):
ds=h5py.Dataset(hid)
ds
plt.plot(ds[:,0].T)
plt.show()
|
BSD 2-Clause Simplified License
|
romana/vpc-router
|
vpcrouter/watcher/plugins/configfile.py
|
Configfile.add_arguments
|
python
|
def add_arguments(cls, parser, sys_arg_list=None):
parser.add_argument('-f', '--file', dest='file', required=True,
help="config file for routing groups "
"(only in configfile mode)")
return ["file"]
|
Arguments for the configfile mode.
|
https://github.com/romana/vpc-router/blob/d696c2e023f1111ceb61f9c6fbabfafed8e14040/vpcrouter/watcher/plugins/configfile.py#L187-L195
|
import datetime
import json
import logging
import os
import watchdog.events
import watchdog.observers
from vpcrouter.errors import ArgsError
from vpcrouter.watcher import common
class RouteSpecChangeEventHandler(watchdog.events.FileSystemEventHandler):
def __init__(self, *args, **kwargs):
self._route_spec_fname = kwargs['route_spec_fname']
self._route_spec_abspath = kwargs['route_spec_abspath']
self._q_route_spec = kwargs['q_route_spec']
self._plugin = kwargs['plugin']
del kwargs['route_spec_fname']
del kwargs['route_spec_abspath']
del kwargs['q_route_spec']
del kwargs['plugin']
super(RouteSpecChangeEventHandler, self).__init__(*args, **kwargs)
def on_modified(self, event):
if type(event) is watchdog.events.FileModifiedEvent and event.src_path == self._route_spec_abspath:
logging.info("Detected file change event for %s" %
self._route_spec_abspath)
try:
route_spec = read_route_spec_config(self._route_spec_fname)
self._q_route_spec.put(route_spec)
if self._plugin:
self._plugin.last_route_spec_update = datetime.datetime.now()
except ValueError as e:
logging.warning("Cannot parse route spec: %s" % str(e))
def read_route_spec_config(fname):
try:
try:
f = open(fname, "r")
except IOError as e:
raise ValueError("Cannot open file: " + str(e))
data = json.loads(f.read())
f.close()
data = common.parse_route_spec_config(data)
except ValueError as e:
logging.error("Config ignored: %s" % str(e))
data = None
return data
class Configfile(common.WatcherPlugin):
def __init__(self, *args, **kwargs):
super(Configfile, self).__init__(*args, **kwargs)
self.last_route_spec_update = None
def start(self):
fname = self.conf['file']
logging.info("Configfile watcher plugin: Starting to watch route spec "
"file '%s' for changes..." % fname)
route_spec = {}
try:
route_spec = read_route_spec_config(fname)
if route_spec:
self.last_route_spec_update = datetime.datetime.now()
self.q_route_spec.put(route_spec)
except ValueError as e:
logging.warning("Cannot parse route spec: %s" % str(e))
abspath = os.path.abspath(fname)
parent_dir = os.path.dirname(abspath)
handler = RouteSpecChangeEventHandler(
route_spec_fname = fname,
route_spec_abspath = abspath,
q_route_spec = self.q_route_spec,
plugin = self)
self.observer_thread = watchdog.observers.Observer()
self.observer_thread.name = "ConfMon"
self.observer_thread.schedule(handler, parent_dir)
self.observer_thread.start()
def stop(self):
self.observer_thread.stop()
self.observer_thread.join()
logging.info("Configfile watcher plugin: Stopped")
def get_info(self):
return {
self.get_plugin_name() : {
"version" : self.get_version(),
"params" : {
"file" : self.conf['file']
},
"stats" : {
"last_route_spec_update" :
self.last_route_spec_update.isoformat()
if self.last_route_spec_update else "(no update, yet)"
}
}
}
@classmethod
|
Apache License 2.0
|
scikit-nano/scikit-nano
|
sknano/structures/_mwnt.py
|
MWNTMixin.Natoms_per_wall
|
python
|
def Natoms_per_wall(self):
return self.Natoms_list
|
Alias for :attr:`MWNT.Natoms_list`
|
https://github.com/scikit-nano/scikit-nano/blob/ef9b24165ba37918b3f520657f7311ba139b3e7d/sknano/structures/_mwnt.py#L149-L151
|
from __future__ import absolute_import, division, print_function, unicode_literals
__docformat__ = 'restructuredtext en'
import numpy as np
from sknano.core.refdata import aCC, element_data
from ._base import NanoStructureBase, r_CC_vdw
from ._swnt import SWNT, compute_dt
from ._extras import generate_Ch_list
__all__ = ['MWNTMixin', 'MWNT']
class MWNTMixin:
@property
def Ch_list(self):
return self._Ch_list
@Ch_list.setter
def Ch_list(self, value):
if not isinstance(value, list):
raise TypeError('Expected a list')
self._Ch_list = value[:]
@property
def chiral_types(self):
return [swnt.chiral_type for swnt in self.walls]
@chiral_types.setter
def chiral_types(self, value):
if not isinstance(value, list):
raise TypeError('Expected a list')
self.update_Ch_list(chiral_types=value)
@property
def chiral_set(self):
return set(self.chiral_types)
@property
def dt(self):
return self.walls[-1].dt
@property
def rt(self):
return self.walls[-1].rt
@property
def Natoms(self):
return np.asarray(self.Natoms_per_wall).sum()
@property
def Natoms_per_tube(self):
return self.Natoms
@property
def Ntubes(self):
return 1
@property
def Nwalls(self):
return len(self.Ch_list)
@Nwalls.setter
def Nwalls(self, value):
self.update_Ch_list(Nwalls=value)
@property
def min_wall_diameter(self):
return self._min_wall_diameter
@min_wall_diameter.setter
def min_wall_diameter(self, value):
self._min_wall_diameter = value
self.update_Ch_list()
@property
def max_wall_diameter(self):
return self._max_wall_diameter
@max_wall_diameter.setter
def max_wall_diameter(self, value):
self._max_wall_diameter = value
self.update_Ch_list()
@property
def max_walls(self):
return self._max_walls
@max_walls.setter
def max_walls(self, value):
self._max_walls = value
@property
def wall_spacing(self):
return self._wall_spacing
@wall_spacing.setter
def wall_spacing(self, value):
self._wall_spacing = value
self.update_Ch_list()
@property
def tube_mass(self):
return np.asarray([swnt.tube_mass for swnt in self.walls]).sum()
@property
|
BSD 2-Clause Simplified License
|
bitmovin/bitmovin-api-sdk-python
|
bitmovin_api_sdk/models/cmaf_muxing.py
|
CmafMuxing.init_segment_name
|
python
|
def init_segment_name(self, init_segment_name):
if init_segment_name is not None:
if not isinstance(init_segment_name, string_types):
raise TypeError("Invalid type for `init_segment_name`, type has to be `string_types`")
self._init_segment_name = init_segment_name
|
Sets the init_segment_name of this CmafMuxing.
Init segment name
:param init_segment_name: The init_segment_name of this CmafMuxing.
:type: string_types
|
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/cmaf_muxing.py#L199-L213
|
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.muxing import Muxing
from bitmovin_api_sdk.models.stream_conditions_mode import StreamConditionsMode
import pprint
import six
class CmafMuxing(Muxing):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
streams=None,
outputs=None,
avg_bitrate=None,
min_bitrate=None,
max_bitrate=None,
ignored_by=None,
stream_conditions_mode=None,
segment_length=None,
segment_naming=None,
segment_naming_template=None,
init_segment_name=None,
init_segment_name_template=None,
segments_muxed=None,
frames_per_cmaf_chunk=None):
super(CmafMuxing, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data, streams=streams, outputs=outputs, avg_bitrate=avg_bitrate, min_bitrate=min_bitrate, max_bitrate=max_bitrate, ignored_by=ignored_by, stream_conditions_mode=stream_conditions_mode)
self._segment_length = None
self._segment_naming = None
self._segment_naming_template = None
self._init_segment_name = None
self._init_segment_name_template = None
self._segments_muxed = None
self._frames_per_cmaf_chunk = None
self.discriminator = None
if segment_length is not None:
self.segment_length = segment_length
if segment_naming is not None:
self.segment_naming = segment_naming
if segment_naming_template is not None:
self.segment_naming_template = segment_naming_template
if init_segment_name is not None:
self.init_segment_name = init_segment_name
if init_segment_name_template is not None:
self.init_segment_name_template = init_segment_name_template
if segments_muxed is not None:
self.segments_muxed = segments_muxed
if frames_per_cmaf_chunk is not None:
self.frames_per_cmaf_chunk = frames_per_cmaf_chunk
@property
def openapi_types(self):
types = {}
if hasattr(super(CmafMuxing, self), 'openapi_types'):
types = getattr(super(CmafMuxing, self), 'openapi_types')
types.update({
'segment_length': 'float',
'segment_naming': 'string_types',
'segment_naming_template': 'string_types',
'init_segment_name': 'string_types',
'init_segment_name_template': 'string_types',
'segments_muxed': 'int',
'frames_per_cmaf_chunk': 'int'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(CmafMuxing, self), 'attribute_map'):
attributes = getattr(super(CmafMuxing, self), 'attribute_map')
attributes.update({
'segment_length': 'segmentLength',
'segment_naming': 'segmentNaming',
'segment_naming_template': 'segmentNamingTemplate',
'init_segment_name': 'initSegmentName',
'init_segment_name_template': 'initSegmentNameTemplate',
'segments_muxed': 'segmentsMuxed',
'frames_per_cmaf_chunk': 'framesPerCmafChunk'
})
return attributes
@property
def segment_length(self):
return self._segment_length
@segment_length.setter
def segment_length(self, segment_length):
if segment_length is not None:
if not isinstance(segment_length, (float, int)):
raise TypeError("Invalid type for `segment_length`, type has to be `float`")
self._segment_length = segment_length
@property
def segment_naming(self):
return self._segment_naming
@segment_naming.setter
def segment_naming(self, segment_naming):
if segment_naming is not None:
if not isinstance(segment_naming, string_types):
raise TypeError("Invalid type for `segment_naming`, type has to be `string_types`")
self._segment_naming = segment_naming
@property
def segment_naming_template(self):
return self._segment_naming_template
@segment_naming_template.setter
def segment_naming_template(self, segment_naming_template):
if segment_naming_template is not None:
if not isinstance(segment_naming_template, string_types):
raise TypeError("Invalid type for `segment_naming_template`, type has to be `string_types`")
self._segment_naming_template = segment_naming_template
@property
def init_segment_name(self):
return self._init_segment_name
@init_segment_name.setter
|
MIT License
|
spotify/luigi
|
luigi/contrib/s3.py
|
S3Client.move
|
python
|
def move(self, source_path, destination_path, **kwargs):
self.copy(source_path, destination_path, **kwargs)
self.remove(source_path)
|
Rename/move an object from one S3 location to another.
:param source_path: The `s3://` path of the directory or key to copy from
:param destination_path: The `s3://` path of the directory or key to copy to
:param kwargs: Keyword arguments are passed to the boto3 function `copy`
|
https://github.com/spotify/luigi/blob/ad5ddc9875e54cca8209863a8ec7bcc5d13ece8a/luigi/contrib/s3.py#L213-L221
|
import datetime
import itertools
import logging
import os
import os.path
import warnings
from multiprocessing.pool import ThreadPool
from urllib.parse import urlsplit
from configparser import NoSectionError
from luigi import configuration
from luigi.format import get_default_format
from luigi.parameter import OptionalParameter, Parameter
from luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory
from luigi.task import ExternalTask
logger = logging.getLogger('luigi-interface')
try:
from boto3.s3.transfer import TransferConfig
import botocore
except ImportError:
logger.warning("Loading S3 module without the python package boto3. "
"Will crash at runtime if S3 functionality is used.")
S3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'
S3_DIRECTORY_MARKER_SUFFIX_1 = '/'
class InvalidDeleteException(FileSystemException):
pass
class FileNotFoundException(FileSystemException):
pass
class DeprecatedBotoClientException(Exception):
pass
class S3Client(FileSystem):
_s3 = None
DEFAULT_PART_SIZE = 8388608
DEFAULT_THREADS = 100
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None,
**kwargs):
options = self._get_s3_config()
options.update(kwargs)
if aws_access_key_id:
options['aws_access_key_id'] = aws_access_key_id
if aws_secret_access_key:
options['aws_secret_access_key'] = aws_secret_access_key
if aws_session_token:
options['aws_session_token'] = aws_session_token
self._options = options
@property
def s3(self):
import boto3
options = dict(self._options)
if self._s3:
return self._s3
aws_access_key_id = options.get('aws_access_key_id')
aws_secret_access_key = options.get('aws_secret_access_key')
role_arn = options.get('aws_role_arn')
role_session_name = options.get('aws_role_session_name')
aws_session_token = options.get('aws_session_token')
if role_arn and role_session_name:
sts_client = boto3.client('sts')
assumed_role = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName=role_session_name)
aws_secret_access_key = assumed_role['Credentials'].get(
'SecretAccessKey')
aws_access_key_id = assumed_role['Credentials'].get('AccessKeyId')
aws_session_token = assumed_role['Credentials'].get('SessionToken')
logger.debug('using aws credentials via assumed role {} as defined in luigi config'
.format(role_session_name))
for key in ['aws_access_key_id', 'aws_secret_access_key',
'aws_role_session_name', 'aws_role_arn', 'aws_session_token']:
if key in options:
options.pop(key)
if not (aws_access_key_id and aws_secret_access_key):
logger.debug('no credentials provided, delegating credentials resolution to boto3')
try:
self._s3 = boto3.resource('s3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
**options)
except TypeError as e:
logger.error(e.args[0])
if 'got an unexpected keyword argument' in e.args[0]:
raise DeprecatedBotoClientException(
"Now using boto3. Check that you're passing the correct arguments")
raise
return self._s3
@s3.setter
def s3(self, value):
self._s3 = value
def exists(self, path):
(bucket, key) = self._path_to_bucket_and_key(path)
if self._is_root(key):
return True
if self._exists(bucket, key):
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False
def remove(self, path, recursive=True):
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.Bucket(bucket)
if self._is_root(key):
raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)
if self._exists(bucket, key):
self.s3.meta.client.delete_object(Bucket=bucket, Key=key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))]
if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)):
delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)})
if len(delete_key_list) > 0:
n = 1000
for i in range(0, len(delete_key_list), n):
self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]})
return True
return False
|
Apache License 2.0
|
mars-project/mars
|
mars/tensor/base/flatten.py
|
flatten
|
python
|
def flatten(a, order="C"):
from ..reshape.reshape import TensorReshape, calc_shape
if np.isnan(sum(a.shape)):
raise ValueError(f"tensor shape is unknown, {a.shape}")
new_shape = calc_shape(a.size, -1)
tensor_order = get_order(order, a.order)
op = TensorReshape(new_shape, dtype=a.dtype, create_view=False)
return op(a, order=tensor_order, out_shape=new_shape)
|
Return a copy of the tensor collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : Tensor
A copy of the input tensor, flattened to one dimension.
See Also
--------
ravel : Return a flattened tensor.
flat : A 1-D flat iterator over the tensor.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1,2], [3,4]])
>>> a.flatten().execute()
array([1, 2, 3, 4])
|
https://github.com/mars-project/mars/blob/d50d9f8d8e966756e8b9dc80aca53a3e4607e7e0/mars/tensor/base/flatten.py#L20-L63
|
import numpy as np
from ..utils import get_order
|
Apache License 2.0
|
nonegg/aredis
|
aredis/commands/pubsub.py
|
parse_cluster_pubsub_numsub
|
python
|
def parse_cluster_pubsub_numsub(res, **options):
aggregate = options.get('aggregate', True)
if not aggregate:
return res
numsub_d = dict()
for _, numsub_tups in res.items():
for channel, numsubbed in numsub_tups:
try:
numsub_d[channel] += numsubbed
except KeyError:
numsub_d[channel] = numsubbed
ret_numsub = []
for channel, numsub in numsub_d.items():
ret_numsub.append((channel, numsub))
return ret_numsub
|
Result callback, handles different return types
switchable by the `aggregate` flag.
|
https://github.com/nonegg/aredis/blob/b46e67163692cd0796763e5c9e17394821d9280c/aredis/commands/pubsub.py#L80-L100
|
from aredis.pubsub import (PubSub,
ClusterPubSub)
from aredis.utils import (dict_merge,
merge_result,
list_keys_to_dict,
NodeFlag)
def parse_pubsub_numsub(response, **options):
return list(zip(response[0::2], response[1::2]))
class PubSubCommandMixin:
RESPONSE_CALLBACKS = {
'PUBSUB NUMSUB': parse_pubsub_numsub,
}
def pubsub(self, **kwargs):
return PubSub(self.connection_pool, **kwargs)
async def publish(self, channel, message):
return await self.execute_command('PUBLISH', channel, message)
async def pubsub_channels(self, pattern='*'):
return await self.execute_command('PUBSUB CHANNELS', pattern)
async def pubsub_numpat(self):
return await self.execute_command('PUBSUB NUMPAT')
async def pubsub_numsub(self, *args):
return await self.execute_command('PUBSUB NUMSUB', *args)
def parse_cluster_pubsub_channels(res, **options):
aggregate = options.get('aggregate', True)
if not aggregate:
return res
return merge_result(res)
def parse_cluster_pubsub_numpat(res, **options):
aggregate = options.get('aggregate', True)
if not aggregate:
return res
numpat = 0
for node, node_numpat in res.items():
numpat += node_numpat
return numpat
|
MIT License
|
tmux-python/tmuxp
|
tmuxp/plugin.py
|
TmuxpPlugin._pass_version_check
|
python
|
def _pass_version_check(self, version, vmin, vmax, incompatible):
if vmin and version < LooseVersion(vmin):
return False
if vmax and version > LooseVersion(vmax):
return False
if version in incompatible:
return False
return True
|
Provide affirmative if version compatibility is correct.
|
https://github.com/tmux-python/tmuxp/blob/dc263d3719aafa756f9fbb742f8c3b85edaece54/tmuxp/plugin.py#L134-L145
|
from distutils.version import LooseVersion
import libtmux
from libtmux.common import get_version
from .__about__ import __version__
from .exc import TmuxpPluginException
TMUX_MIN_VERSION = '1.8'
TMUX_MAX_VERSION = None
LIBTMUX_MIN_VERSION = '0.8.3'
LIBTMUX_MAX_VERSION = None
TMUXP_MIN_VERSION = '1.6.0'
TMUXP_MAX_VERSION = None
class TmuxpPlugin:
def __init__(
self,
plugin_name='tmuxp-plugin',
tmux_min_version=TMUX_MIN_VERSION,
tmux_max_version=TMUX_MAX_VERSION,
tmux_version_incompatible=None,
libtmux_min_version=LIBTMUX_MIN_VERSION,
libtmux_max_version=LIBTMUX_MAX_VERSION,
libtmux_version_incompatible=None,
tmuxp_min_version=TMUXP_MIN_VERSION,
tmuxp_max_version=TMUXP_MAX_VERSION,
tmuxp_version_incompatible=None,
):
self.plugin_name = plugin_name
self.tmux_version = get_version()
self.libtmux_version = libtmux.__version__
self.tmuxp_version = LooseVersion(__version__)
self.version_constraints = {
'tmux': {
'version': self.tmux_version,
'vmin': tmux_min_version,
'vmax': tmux_max_version,
'incompatible': tmux_version_incompatible
if tmux_version_incompatible
else [],
},
'libtmux': {
'version': self.libtmux_version,
'vmin': libtmux_min_version,
'vmax': libtmux_max_version,
'incompatible': libtmux_version_incompatible
if libtmux_version_incompatible
else [],
},
'tmuxp': {
'version': self.tmuxp_version,
'vmin': tmuxp_min_version,
'vmax': tmuxp_max_version,
'incompatible': tmuxp_version_incompatible
if tmuxp_version_incompatible
else [],
},
}
self._version_check()
def _version_check(self):
for dep, constraints in self.version_constraints.items():
try:
assert self._pass_version_check(**constraints)
except AssertionError:
raise TmuxpPluginException(
'Incompatible {dep} version: {version}\n{plugin_name} '
'requirements:\nmin: {vmin} | max: {vmax} | '
'incompatible: {incompatible}\n'.format(
dep=dep, plugin_name=self.plugin_name, **constraints
)
)
|
MIT License
|
nmichlo/disent
|
disent/dataset/data/_groundtruth__norb.py
|
read_norb_dataset
|
python
|
def read_norb_dataset(dat_path: str, cat_path: str, info_path: str, gzipped=True, sort=True) -> Tuple[np.ndarray, np.ndarray]:
dat = read_binary_matrix_file(dat_path, gzipped=gzipped)
cat = read_binary_matrix_file(cat_path, gzipped=gzipped)
info = read_binary_matrix_file(info_path, gzipped=gzipped)
factors = np.column_stack([cat, info])
factors[:, 3] = factors[:, 3] / 2
images = dat[:, 0]
if sort:
indices = np.lexsort(factors[:, [4, 3, 2, 1, 0]].T)
images = images[indices]
factors = factors[indices]
return images, factors
|
Load The Normalised Dataset
* dat:
- images (5 categories, 5 instances, 6 lightings, 9 elevations, and 18 azimuths)
* cat:
- initial ground truth factor:
0. category of images (0 for animal, 1 for human, 2 for plane, 3 for truck, 4 for car).
* info:
- additional ground truth factors:
1. the instance in the category (0 to 9)
2. the elevation (0 to 8, which mean cameras are 30, 35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
3. the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in degrees)
4. the lighting condition (0 to 5)
|
https://github.com/nmichlo/disent/blob/9bdd81d2ee8b0fbf1cc0d09993a5efd2fa9e5bd1/disent/dataset/data/_groundtruth__norb.py#L94-L123
|
import gzip
import logging
import os
from typing import Optional
from typing import Sequence
from typing import Tuple
import numpy as np
from disent.dataset.util.datafile import DataFileHashedDl
from disent.dataset.data._groundtruth import DiskGroundTruthData
_BINARY_MATRIX_TYPES = {
0x1E3D4C55: 'uint8',
0x1E3D4C54: 'int32',
0x1E3D4C56: 'int16',
0x1E3D4C51: 'float32',
0x1E3D4C53: 'float64',
}
def read_binary_matrix_bytes(bytes):
dtype = int(np.frombuffer(bytes, dtype='int32', count=1, offset=0))
ndim = int(np.frombuffer(bytes, dtype='int32', count=1, offset=4))
stored_ndim = max(3, ndim)
dims = np.frombuffer(bytes, dtype='int32', count=stored_ndim, offset=8)[0:ndim]
data = np.frombuffer(bytes, dtype=_BINARY_MATRIX_TYPES[dtype], count=-1, offset=8 + stored_ndim * 4)
data = data.reshape(tuple(dims))
return data
def read_binary_matrix_file(file, gzipped: bool = True):
with (gzip.open if gzipped else open)(file, "rb") as f:
return read_binary_matrix_bytes(bytes=f.read())
|
MIT License
|
kartverket/midgard
|
midgard/data/collection.py
|
Collection.default_field_suffix
|
python
|
def default_field_suffix(self, suffix):
if suffix:
suffix = str(suffix)
if not suffix.startswith("_"):
suffix = "_" + suffix
else:
suffix = None
for collection in self.for_each_fieldtype("collection"):
collection.default_field_suffix = suffix
self._default_field_suffix = suffix
|
Set the default field suffix
|
https://github.com/kartverket/midgard/blob/faf8963c9e0e49255c90a60ba5671277912777fd/midgard/data/collection.py#L131-L141
|
import copy
from typing import List, Dict, Any
import numpy as np
from midgard.data import fieldtypes
from midgard.dev import console
from midgard.dev import exceptions
from midgard.math.unit import Unit
class Collection:
type = "collection"
def __init__(self):
self._fields = dict()
self._default_field_suffix = None
@property
def fields(self):
all_fields = list()
for fieldname, field in self._fields.items():
all_fields.append(fieldname)
try:
all_fields.extend([f"{fieldname}.{f}" for f in field.fields])
except AttributeError:
pass
return sorted(all_fields)
def fieldnames(self):
all_fields = list()
for field in self._fields.values():
all_fields.extend(field.subfields)
return sorted(all_fields)
def field(self, fieldname: str) -> "FieldType":
mainfield, _, subfield = fieldname.partition(".")
try:
field = self._fields[mainfield]
except KeyError:
raise exceptions.FieldDoesNotExistError(f"Field {mainfield!r} does not exist") from None
if subfield:
field_data = field.data
try:
field = field_data.field(subfield)
except AttributeError:
pass
return field
@property
def plot_fields(self):
all_fields = list()
for fieldname, field in self._fields.items():
if fieldname.endswith("_"):
continue
all_fields.extend(field.plot_fields)
return sorted(all_fields)
def unit(self, field):
mainfield, _, subfield = field.partition(".")
try:
return self._fields[mainfield].unit(subfield)
except KeyError:
raise exceptions.FieldDoesNotExistError(f"Field {mainfield!r} does not exist") from None
def unit_short(self, field):
units = self.unit(field)
if units is None:
return tuple()
return tuple([Unit.symbol(u) for u in units])
def set_unit(self, field, new_unit):
mainfield, _, subfield = field.partition(".")
try:
return self._fields[mainfield].set_unit(subfield, new_unit)
except KeyError:
raise exceptions.FieldDoesNotExistError(f"Field {mainfield!r} does not exist") from None
def for_each_fieldtype(self, fieldtype):
for field in self._fields.values():
module = field.__module__.split(".")[-1]
if fieldtype == module:
yield field.data
def for_each_suffix(self, key):
*collections, key = key.split(".")
container = self
for c in collections:
container = container._fields[c].data
previous_field_suffix = self.default_field_suffix
sm = [(f[len(key) :], container._fields[f].multiplier) for f in container._fields if f.startswith(key)]
for suffix, multiplier in sm:
if suffix and not suffix[1:].isdigit():
continue
self.default_field_suffix = suffix
yield multiplier
self.default_field_suffix = previous_field_suffix
@property
def default_field_suffix(self):
if self._default_field_suffix is None:
return ""
return self._default_field_suffix
@default_field_suffix.setter
|
MIT License
|
shenyunhang/ws-jds
|
detectron/datasets/json_dataset.py
|
JsonDataset.get_roidb
|
python
|
def get_roidb(
self,
gt=False,
proposal_file=None,
min_proposal_size=20,
proposal_limit=-1,
crowd_filter_thresh=0
):
assert gt is True or crowd_filter_thresh == 0, 'Crowd filter threshold must be 0 if ground-truth annotations ' 'are not included.'
image_ids = self.COCO.getImgIds()
image_ids.sort()
roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))
for entry in roidb:
self._prep_roidb_entry(entry)
if gt:
self.debug_timer.tic()
for entry in roidb:
self._add_gt_annotations(entry)
logger.debug(
'_add_gt_annotations took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
if cfg.USE_PSEUDO and 'test' not in self.name:
pgt_roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))
for entry in pgt_roidb:
self._prep_roidb_entry(entry)
self._add_pseudo_gt_annotations(pgt_roidb, roidb)
roidb = pgt_roidb
if proposal_file is not None:
self.debug_timer.tic()
self._add_proposals_from_file(
roidb, proposal_file, min_proposal_size, proposal_limit,
crowd_filter_thresh
)
logger.debug(
'_add_proposals_from_file took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
_add_class_assignments(roidb)
return roidb
|
Return an roidb corresponding to the json dataset. Optionally:
- include ground truth boxes in the roidb
- add proposals specified in a proposals file
- filter proposals based on a minimum side length
- filter proposals that intersect with crowd regions
|
https://github.com/shenyunhang/ws-jds/blob/4827791640c5affef1af4b548333694973f5c0d5/detectron/datasets/json_dataset.py#L87-L139
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import logging
import numpy as np
import os
import scipy.sparse
import detectron.utils.env as envu
envu.set_up_matplotlib()
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
from detectron.core.config import cfg
from detectron.utils.timer import Timer
import detectron.datasets.dataset_catalog as dataset_catalog
import detectron.utils.boxes as box_utils
from detectron.utils.io import load_object
import detectron.utils.segms as segm_utils
logger = logging.getLogger(__name__)
class JsonDataset(object):
def __init__(self, name):
assert dataset_catalog.contains(name), 'Unknown dataset name: {}'.format(name)
assert os.path.exists(dataset_catalog.get_im_dir(name)), 'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
assert os.path.exists(dataset_catalog.get_ann_fn(name)), 'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
logger.debug('Creating: {}'.format(name))
self.name = name
self.image_directory = dataset_catalog.get_im_dir(name)
self.image_prefix = dataset_catalog.get_im_prefix(name)
self.COCO = COCO(dataset_catalog.get_ann_fn(name))
self.debug_timer = Timer()
category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1
for i, v in enumerate(self.COCO.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k
for k, v in self.json_category_id_to_contiguous_id.items()
}
self._init_keypoints()
logger.info(self.classes)
logger.info(self.json_category_id_to_contiguous_id)
logger.info(self.contiguous_category_id_to_json_id)
|
Apache License 2.0
|
thalesgroup/pycryptoki
|
pycryptoki/key_generator.py
|
c_destroy_object
|
python
|
def c_destroy_object(h_session, h_object_value):
ret = C_DestroyObject(h_session, CK_OBJECT_HANDLE(h_object_value))
return ret
|
Deletes the object corresponsing to the passed in object handle
:param int h_session: Session handle
:param int h_object_value: The handle of the object to delete
:returns: Return code
|
https://github.com/thalesgroup/pycryptoki/blob/b1c97389b9db11c8bd96722db5347cc54a051602/pycryptoki/key_generator.py#L22-L30
|
from ctypes import byref
from .attributes import Attributes
from .cryptoki import C_DeriveKey
from .cryptoki import (
C_DestroyObject,
CK_OBJECT_HANDLE,
CK_ULONG,
C_GenerateKey,
C_GenerateKeyPair,
C_CopyObject,
)
from .default_templates import CKM_DES_KEY_GEN_TEMP, get_default_key_pair_template
from .defines import CKM_DES_KEY_GEN, CKM_RSA_PKCS_KEY_PAIR_GEN
from .mechanism import parse_mechanism
from .exceptions import make_error_handle_function
|
Apache License 2.0
|
system73/tamarco
|
tamarco/core/microservice.py
|
MicroserviceBase.start_settings
|
python
|
async def start_settings(self):
self.logger.info("Starting microservice settings")
await self.settings.start()
self.deploy_name = await self.settings.get(f"{ROOT_SETTINGS}.deploy_name")
await self._configure_logging_settings()
await self._configure_resource_settings()
|
Initializes the settings of the microservice.
|
https://github.com/system73/tamarco/blob/c85bec267d39057a4cd5f1c9854d5e2840cebb1e/tamarco/core/microservice.py#L149-L155
|
import asyncio
import logging
import sys
import time
import uuid
from collections import OrderedDict
from collections.abc import Callable
from functools import partial
from threading import Thread
from typing import Coroutine, Union
from tamarco.core.dependency_resolver import CantSolveDependencies, resolve_dependency_order
from tamarco.core.logging.logging import Logging
from tamarco.core.patterns import Singleton
from tamarco.core.settings.settings import Settings, SettingsView
from tamarco.core.signals import SignalsManager
from tamarco.core.tasks import TasksManager, get_task_wrapper, get_thread_wrapper
from tamarco.core.utils import Informer, ROOT_SETTINGS, get_fn_full_signature
from tamarco.resources.bases import BaseResource
from tamarco.resources.basic.metrics.resource import MetricsResource
from tamarco.resources.basic.registry.resource import Registry
from tamarco.resources.basic.status.resource import StatusResource
from tamarco.resources.debug.profiler import ProfilerResource
from tamarco.resources.io.http.resource import HTTPServerResource
logger = logging.getLogger("tamarco")
class MicroserviceBase(metaclass=Singleton):
name = None
instance_id = uuid.uuid4()
deploy_name = None
extra_loggers_names = []
loop = asyncio.get_event_loop()
tasks_manager = TasksManager()
settings = Settings()
logging = Logging()
@property
def loggers_names(self):
loggers = {"tamarco", "tamarco.tasks", "tamarco.settings", "asyncio"}
for resource in self.resources.values():
loggers.update(resource.loggers_names)
loggers.update(self.extra_loggers_names)
loggers.update({self.name})
return loggers
def __new__(cls, *args, **kwargs):
cls.resources = OrderedDict()
dependency_graph = {
attr_name: getattr(cls, attr_name).depends_on
for attr_name in dir(cls)
if isinstance(getattr(cls, attr_name), BaseResource)
}
try:
resources_dep_ordered = resolve_dependency_order(dependency_graph)
except CantSolveDependencies as e:
print(e, file=sys.stderr)
exit(12)
else:
for name in resources_dep_ordered:
cls.resources[name] = getattr(cls, name)
return super().__new__(cls, *args, **kwargs)
def __init__(self):
assert self.name is not None, "Error, name should be defined in your microservice class"
self.logger = None
self._configure_provisional_logger()
def _configure_provisional_logger(self):
self.logger = logging.getLogger(self.name)
stdout_handler = logging.StreamHandler(sys.stdout)
print(f"Configuring logger provisional logger of {self.name} to INFO and stdout")
self.logger.setLevel(logging.INFO)
self.logger.addHandler(stdout_handler)
self.logger.info(f"Configured {self.name} logger")
async def bind(self):
self.logger.info(f"Binding to microservice the resources: {list(self.resources.keys())}")
await self.settings.bind(self.loop)
for name, resource in self.resources.items():
try:
await resource.bind(self, name)
except Exception:
self.logger.exception(f"Unexpected exception binding the resource {resource}")
exit(11)
async def run_in_all_resources(self, method):
for resource in self.resources.values():
self.logger.debug(f"Calling {method} of resource {resource.name}")
try:
await getattr(resource, method)()
except Exception:
self.logger.exception(f"Error in {method} of resource {resource}")
else:
if method == "start":
self.logger.info(f"Started {resource.name} from {self.name}")
async def start_logging(self):
self.logger.info(f"Starting logging in microservice {self.name} with loggers: {self.loggers_names}")
await self.logging.start(
loggers=self.loggers_names, microservice_name=self.name, deploy_name=self.deploy_name, loop=self.loop
)
Informer.log_all_info(self.logger)
async def stop_settings(self):
self.logger.info("Stopping microservice settings")
await self.settings.stop()
|
MIT License
|
sertit/eoreader
|
eoreader/products/sar/csk_product.py
|
CskProduct.wgs84_extent
|
python
|
def wgs84_extent(self) -> gpd.GeoDataFrame:
root, _ = self.read_mtd()
def from_str_to_arr(geo_coord: str):
return np.array(strings.str_to_list(geo_coord), dtype=float)[:2][::-1]
bl_corner = from_str_to_arr(root.findtext(".//GeoCoordBottomLeft"))
br_corner = from_str_to_arr(root.findtext(".//GeoCoordBottomRight"))
tl_corner = from_str_to_arr(root.findtext(".//GeoCoordTopLeft"))
tr_corner = from_str_to_arr(root.findtext(".//GeoCoordTopRight"))
if bl_corner is None:
raise InvalidProductError("Invalid XML: missing extent.")
extent_wgs84 = gpd.GeoDataFrame(
geometry=[Polygon([tl_corner, tr_corner, br_corner, bl_corner])],
crs=vectors.WGS84,
)
return extent_wgs84
|
Get the WGS84 extent of the file before any reprojection.
This is useful when the SAR pre-process has not been done yet.
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"1011117-766193"
>>> prod = Reader().open(path)
>>> prod.wgs84_extent()
geometry
0 POLYGON ((108.09797 15.61011, 108.48224 15.678...
Returns:
gpd.GeoDataFrame: WGS84 extent as a gpd.GeoDataFrame
|
https://github.com/sertit/eoreader/blob/040628b042f396ea330a64b47e68175ab4bcbb46/eoreader/products/sar/csk_product.py#L197-L234
|
import logging
import warnings
from datetime import datetime
from enum import unique
from pathlib import Path
from typing import Union
import geopandas as gpd
import numpy as np
import rasterio
from cloudpathlib import AnyPath, CloudPath
from lxml import etree
from sertit import files, strings, vectors
from sertit.misc import ListEnum
from shapely.geometry import Polygon
from eoreader import utils
from eoreader.exceptions import InvalidProductError
from eoreader.products.sar.sar_product import SarProduct, SarProductType
from eoreader.utils import DATETIME_FMT, EOREADER_NAME
LOGGER = logging.getLogger(EOREADER_NAME)
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
@unique
class CskProductType(ListEnum):
RAW = "RAW"
SCS = "SCS"
DGM = "DGM"
GEC = "GEC"
GTC = "GTC"
@unique
class CskSensorMode(ListEnum):
HI = "HIMAGE"
PP = "PINGPONG"
WR = "WIDEREGION"
HR = "HUGEREGION"
S2 = "ENHANCED SPOTLIGHT"
@unique
class CskPolarization(ListEnum):
HH = "HH"
VV = "VV"
HV = "HV"
VH = "VH"
CO = "CO"
CH = "CH"
CV = "CV"
class CskProduct(SarProduct):
def __init__(
self,
product_path: Union[str, CloudPath, Path],
archive_path: Union[str, CloudPath, Path] = None,
output_path: Union[str, CloudPath, Path] = None,
remove_tmp: bool = False,
) -> None:
try:
product_path = AnyPath(product_path)
self._img_path = next(product_path.glob("*.h5"))
except IndexError as ex:
raise InvalidProductError(
f"Image file (*.h5) not found in {product_path}"
) from ex
self._real_name = files.get_filename(self._img_path)
super().__init__(product_path, archive_path, output_path, remove_tmp)
def _set_resolution(self) -> float:
def_res = None
try:
root, _ = self.read_mtd()
def_res = float(root.findtext(".//GroundRangeGeometricResolution"))
except (InvalidProductError, TypeError):
raise InvalidProductError(
"GroundRangeGeometricResolution or rowSpacing not found in metadata !"
)
return def_res
def _pre_init(self) -> None:
self._raw_band_regex = "*_{}_*.h5"
self._band_folder = self.path
self._snap_path = self._img_path.name
self.needs_extraction = True
super()._pre_init()
def _post_init(self) -> None:
super()._post_init()
|
Apache License 2.0
|
veeresht/commpy
|
commpy/channels.py
|
bec
|
python
|
def bec(input_bits, p_e):
output_bits = input_bits.copy()
output_bits[random(len(output_bits)) <= p_e] = -1
return output_bits
|
Binary Erasure Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_e : float in [0, 1]
Erasure probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
|
https://github.com/veeresht/commpy/blob/aab0f22b57bfbccee95f44b78108824feee3af91/commpy/channels.py#L630-L649
|
from __future__ import division, print_function
from numpy import complex, abs, sqrt, sum, zeros, identity, hstack, einsum, trace, kron, absolute, fromiter, array, exp, pi, cos
from numpy.random import randn, random, standard_normal
from scipy.linalg import sqrtm
__all__ = ['SISOFlatChannel', 'MIMOFlatChannel', 'bec', 'bsc', 'awgn']
class _FlatChannel(object):
def __init__(self):
self.noises = None
self.channel_gains = None
self.unnoisy_output = None
def generate_noises(self, dims):
assert self.noise_std is not None, "Noise standard deviation must be set before propagation."
if self.isComplex:
self.noises = (standard_normal(dims) + 1j * standard_normal(dims)) * self.noise_std * 0.5
else:
self.noises = standard_normal(dims) * self.noise_std
def set_SNR_dB(self, SNR_dB, code_rate: float = 1., Es=1):
self.noise_std = sqrt((self.isComplex + 1) * self.nb_tx * Es / (code_rate * 10 ** (SNR_dB / 10)))
def set_SNR_lin(self, SNR_lin, code_rate=1, Es=1):
self.noise_std = sqrt((self.isComplex + 1) * self.nb_tx * Es / (code_rate * SNR_lin))
@property
def isComplex(self):
return self._isComplex
class SISOFlatChannel(_FlatChannel):
@property
def nb_tx(self):
return 1
@property
def nb_rx(self):
return 1
def __init__(self, noise_std=None, fading_param=(1, 0)):
super(SISOFlatChannel, self).__init__()
self.noise_std = noise_std
self.fading_param = fading_param
def propagate(self, msg):
if isinstance(msg[0], complex) and not self.isComplex:
raise TypeError('Trying to propagate a complex message in a real channel.')
nb_symb = len(msg)
self.generate_noises(nb_symb)
self.channel_gains = self.fading_param[0]
if self.isComplex:
self.channel_gains += (standard_normal(nb_symb) + 1j * standard_normal(nb_symb)) * sqrt(0.5 * self.fading_param[1])
else:
self.channel_gains += standard_normal(nb_symb) * sqrt(self.fading_param[1])
self.unnoisy_output = self.channel_gains * msg
return self.unnoisy_output + self.noises
@property
def fading_param(self):
return self._fading_param
@fading_param.setter
def fading_param(self, fading_param):
if fading_param[1] + absolute(fading_param[0]) ** 2 != 1:
raise ValueError("With this parameters, the channel would add or remove energy.")
self._fading_param = fading_param
self._isComplex = isinstance(fading_param[0], complex)
@property
def k_factor(self):
return absolute(self.fading_param[0]) ** 2 / absolute(self.fading_param[1])
class MIMOFlatChannel(_FlatChannel):
def __init__(self, nb_tx, nb_rx, noise_std=None, fading_param=None):
super(MIMOFlatChannel, self).__init__()
self.nb_tx = nb_tx
self.nb_rx = nb_rx
self.noise_std = noise_std
if fading_param is None:
self.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))
else:
self.fading_param = fading_param
def propagate(self, msg):
if isinstance(msg[0], complex) and not self.isComplex:
raise TypeError('Trying to propagate a complex message in a real channel.')
(nb_vect, mod) = divmod(len(msg), self.nb_tx)
if mod:
msg = hstack((msg, zeros(self.nb_tx - mod)))
nb_vect += 1
msg = msg.reshape(nb_vect, -1)
self.generate_noises((nb_vect, self.nb_rx))
dims = (nb_vect, self.nb_rx, self.nb_tx)
if self.isComplex:
self.channel_gains = (standard_normal(dims) + 1j * standard_normal(dims)) * sqrt(0.5)
else:
self.channel_gains = standard_normal(dims)
einsum('ij,ajk,lk->ail', sqrtm(self.fading_param[2]), self.channel_gains, sqrtm(self.fading_param[1]),
out=self.channel_gains, optimize='greedy')
self.channel_gains += self.fading_param[0]
self.unnoisy_output = einsum('ijk,ik->ij', self.channel_gains, msg)
return self.unnoisy_output + self.noises
def _update_corr_KBSM(self, betat, betar):
if betar < 0 or betat < 0:
raise ValueError("beta must be positif")
Er = array([[exp(-betar * abs(m - n)) for m in range(self.nb_rx)] for n in range(self.nb_rx)])
Et = array([[exp(-betat * abs(m - n)) for m in range(self.nb_tx)] for n in range(self.nb_tx)])
self.fading_param = self.fading_param[0], self.fading_param[1] * Et, self.fading_param[2] * Er
def specular_compo(self, thetat, dt, thetar, dr):
if dr < 0 or dt < 0:
raise ValueError("the distance must be positive ")
H = zeros((self.nb_rx, self.nb_tx), dtype=complex)
for n in range(self.nb_rx):
for m in range(self.nb_tx):
H[n, m] = exp(1j * 2 * pi * (n * dr * cos(thetar) - m * dt * cos(thetat)))
return H
@property
def fading_param(self):
return self._fading_param
@fading_param.setter
def fading_param(self, fading_param):
NLOS_gain = trace(kron(fading_param[1].T, fading_param[2]))
LOS_gain = einsum('ij,ij->', absolute(fading_param[0]), absolute(fading_param[0]))
if absolute(NLOS_gain + LOS_gain - self.nb_tx * self.nb_rx) > 1e-3:
raise ValueError("With this parameters, the channel would add or remove energy.")
self._fading_param = fading_param
self._isComplex = isinstance(fading_param[0][0, 0], complex)
@property
def k_factor(self):
NLOS_gain = trace(kron(self.fading_param[1].T, self.fading_param[2]))
LOS_gain = einsum('ij,ij->', absolute(self.fading_param[0]), absolute(self.fading_param[0]))
return LOS_gain / NLOS_gain
def uncorr_rayleigh_fading(self, dtype):
self.fading_param = zeros((self.nb_rx, self.nb_tx), dtype), identity(self.nb_tx), identity(self.nb_rx)
def expo_corr_rayleigh_fading(self, t, r, betat=0, betar=0):
if abs(t) - 1 > 1e-4:
raise ValueError('abs(t) must be one.')
if abs(r) - 1 > 1e-4:
raise ValueError('abs(r) must be one.')
expo_tx = fromiter((j - i for i in range(self.nb_tx) for j in range(self.nb_tx)), int, self.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(self.nb_rx) for j in range(self.nb_rx)), int, self.nb_rx ** 2)
expo_tx = expo_tx.reshape(self.nb_tx, self.nb_tx)
expo_rx = expo_rx.reshape(self.nb_rx, self.nb_rx)
self.fading_param = zeros((self.nb_rx, self.nb_tx), complex), t ** expo_tx, r ** expo_rx
self._update_corr_KBSM(betat, betar)
def uncorr_rician_fading(self, mean, k_factor):
nb_antennas = mean.size
NLOS_gain = nb_antennas / (k_factor + 1)
mean = mean * sqrt(k_factor * NLOS_gain / einsum('ij,ij->', absolute(mean), absolute(mean)))
self.fading_param = mean, identity(self.nb_tx) * NLOS_gain / nb_antennas, identity(self.nb_rx)
def expo_corr_rician_fading(self, mean, k_factor, t, r, betat=0, betar=0):
if abs(t) - 1 > 1e-4:
raise ValueError('abs(t) must be one.')
if abs(r) - 1 > 1e-4:
raise ValueError('abs(r) must be one.')
nb_antennas = mean.size
NLOS_gain = nb_antennas / (k_factor + 1)
mean = mean * sqrt(k_factor * NLOS_gain / einsum('ij,ij->', absolute(mean), absolute(mean)))
expo_tx = fromiter((j - i for i in range(self.nb_tx) for j in range(self.nb_tx)), int, self.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(self.nb_rx) for j in range(self.nb_rx)), int, self.nb_rx ** 2)
expo_tx = expo_tx.reshape(self.nb_tx, self.nb_tx)
expo_rx = expo_rx.reshape(self.nb_rx, self.nb_rx)
self.fading_param = mean, t ** expo_tx * NLOS_gain / nb_antennas, r ** expo_rx
self._update_corr_KBSM(betat, betar)
|
BSD 3-Clause New or Revised License
|
dit/dit
|
dit/math/sigmaalgebra.py
|
sets2matrix
|
python
|
def sets2matrix(C, X=None):
C = {frozenset(c) for c in C}
if X is None:
Xset = frozenset().union(*C)
else:
Xset = frozenset(X)
for cet in C:
if not Xset.issuperset(cet):
msg = "Set {0} is not a subset of {1}".format(cet, Xset)
raise Exception(msg)
Cmatrix = [[1 if x in cet else 0 for x in Xset] for cet in C]
Cmatrix = np.array(Cmatrix, dtype=int)
return Cmatrix, Xset
|
Returns the sets in C as binary strings representing elements in X.
Paramters
---------
C : set of frozensets
The set of subsets of X.
X : frozenset, None
The underlying set. If None, then X is taken to be the union of the
sets in C.
Returns
-------
Cmatrix : NumPy array, shape ( len(C), len(X) )
The 0-1 matrix whose rows represent sets in C. The columns tell us
if the corresponding element in X is present in the subset of C.
Xset : frozenset
The underlying set that was used to construct Cmatrix.
|
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/math/sigmaalgebra.py#L23-L59
|
from collections import defaultdict
import numpy as np
from dit.utils import powerset
__all__ = (
'is_sigma_algebra',
'sigma_algebra',
'atom_set',
)
|
BSD 3-Clause New or Revised License
|
nidhaloff/igel
|
igel/extras/kmedians.py
|
KMedians._is_nonnegative
|
python
|
def _is_nonnegative(self, value, variable, strict = True):
if not isinstance(value,(int,np.integer)):
raise ValueError("%s should be an integer" % (variable))
if strict:
isnegative = value > 0
else:
isnegative = value >= 0
if not isnegative:
raise ValueError("%s should be non-negative" % (variable))
return isnegative
|
Checks if the value passed is a non-negative integer which may or may not include zero
|
https://github.com/nidhaloff/igel/blob/a78f098c7ed56e1cbc4328782687497c913948e5/igel/extras/kmedians.py#L99-L112
|
import random
import numpy as np
import warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.metrics.pairwise import pairwise_distances, pairwise_distances_argmin
class KMedians(BaseEstimator, ClusterMixin):
def __init__(self, n_clusters = 4, metric = 'manhattan', method = 'per-axis', init = 'random', max_iter = 300, tol = 0.0001, random_state = None):
self.n_clusters = n_clusters
self.metric = metric
self.method = method
self.init = init
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
def _get_random_state(self, seed = None):
if seed is None or seed is np.random:
return np.random.mtrand._rand
elif isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
return seed
|
MIT License
|
unofficial-memsource/memsource-cli-client
|
memsource_cli/models/file_import_settings_create_dto.py
|
FileImportSettingsCreateDto.resx
|
python
|
def resx(self, resx):
self._resx = resx
|
Sets the resx of this FileImportSettingsCreateDto.
:param resx: The resx of this FileImportSettingsCreateDto. # noqa: E501
:type: ResxSettingsDto
|
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/file_import_settings_create_dto.py#L933-L941
|
import pprint
import re
import six
from memsource_cli.models.android_settings_dto import AndroidSettingsDto
from memsource_cli.models.csv_settings_dto import CsvSettingsDto
from memsource_cli.models.dita_settings_dto import DitaSettingsDto
from memsource_cli.models.doc_book_settings_dto import DocBookSettingsDto
from memsource_cli.models.doc_settings_dto import DocSettingsDto
from memsource_cli.models.html_settings_dto import HtmlSettingsDto
from memsource_cli.models.idml_settings_dto import IdmlSettingsDto
from memsource_cli.models.json_settings_dto import JsonSettingsDto
from memsource_cli.models.mac_settings_dto import MacSettingsDto
from memsource_cli.models.md_settings_dto import MdSettingsDto
from memsource_cli.models.mif_settings_dto import MifSettingsDto
from memsource_cli.models.multilingual_xls_settings_dto import MultilingualXlsSettingsDto
from memsource_cli.models.multilingual_xml_settings_dto import MultilingualXmlSettingsDto
from memsource_cli.models.pdf_settings_dto import PdfSettingsDto
from memsource_cli.models.php_settings_dto import PhpSettingsDto
from memsource_cli.models.po_settings_dto import PoSettingsDto
from memsource_cli.models.ppt_settings_dto import PptSettingsDto
from memsource_cli.models.properties_settings_dto import PropertiesSettingsDto
from memsource_cli.models.psd_settings_dto import PsdSettingsDto
from memsource_cli.models.quark_tag_settings_dto import QuarkTagSettingsDto
from memsource_cli.models.resx_settings_dto import ResxSettingsDto
from memsource_cli.models.sdl_xlf_settings_dto import SdlXlfSettingsDto
from memsource_cli.models.tm_match_settings_dto import TMMatchSettingsDto
from memsource_cli.models.ttx_settings_dto import TtxSettingsDto
from memsource_cli.models.txt_settings_dto import TxtSettingsDto
from memsource_cli.models.xlf2_settings_dto import Xlf2SettingsDto
from memsource_cli.models.xlf_settings_dto import XlfSettingsDto
from memsource_cli.models.xls_settings_dto import XlsSettingsDto
from memsource_cli.models.xml_settings_dto import XmlSettingsDto
from memsource_cli.models.yaml_settings_dto import YamlSettingsDto
class FileImportSettingsCreateDto(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'input_charset': 'str',
'output_charset': 'str',
'zip_charset': 'str',
'file_format': 'str',
'target_length': 'bool',
'target_length_max': 'int',
'target_length_percent': 'bool',
'target_length_percent_value': 'float',
'segmentation_rule_id': 'int',
'target_segmentation_rule_id': 'int',
'android': 'AndroidSettingsDto',
'csv': 'CsvSettingsDto',
'dita': 'DitaSettingsDto',
'doc_book': 'DocBookSettingsDto',
'doc': 'DocSettingsDto',
'html': 'HtmlSettingsDto',
'idml': 'IdmlSettingsDto',
'json': 'JsonSettingsDto',
'mac': 'MacSettingsDto',
'md': 'MdSettingsDto',
'mif': 'MifSettingsDto',
'multilingual_xls': 'MultilingualXlsSettingsDto',
'multilingual_xml': 'MultilingualXmlSettingsDto',
'pdf': 'PdfSettingsDto',
'php': 'PhpSettingsDto',
'po': 'PoSettingsDto',
'ppt': 'PptSettingsDto',
'properties': 'PropertiesSettingsDto',
'psd': 'PsdSettingsDto',
'quark_tag': 'QuarkTagSettingsDto',
'resx': 'ResxSettingsDto',
'sdl_xlf': 'SdlXlfSettingsDto',
'tm_match': 'TMMatchSettingsDto',
'ttx': 'TtxSettingsDto',
'txt': 'TxtSettingsDto',
'xlf2': 'Xlf2SettingsDto',
'xlf': 'XlfSettingsDto',
'xls': 'XlsSettingsDto',
'xml': 'XmlSettingsDto',
'yaml': 'YamlSettingsDto'
}
attribute_map = {
'input_charset': 'inputCharset',
'output_charset': 'outputCharset',
'zip_charset': 'zipCharset',
'file_format': 'fileFormat',
'target_length': 'targetLength',
'target_length_max': 'targetLengthMax',
'target_length_percent': 'targetLengthPercent',
'target_length_percent_value': 'targetLengthPercentValue',
'segmentation_rule_id': 'segmentationRuleId',
'target_segmentation_rule_id': 'targetSegmentationRuleId',
'android': 'android',
'csv': 'csv',
'dita': 'dita',
'doc_book': 'docBook',
'doc': 'doc',
'html': 'html',
'idml': 'idml',
'json': 'json',
'mac': 'mac',
'md': 'md',
'mif': 'mif',
'multilingual_xls': 'multilingualXls',
'multilingual_xml': 'multilingualXml',
'pdf': 'pdf',
'php': 'php',
'po': 'po',
'ppt': 'ppt',
'properties': 'properties',
'psd': 'psd',
'quark_tag': 'quarkTag',
'resx': 'resx',
'sdl_xlf': 'sdlXlf',
'tm_match': 'tmMatch',
'ttx': 'ttx',
'txt': 'txt',
'xlf2': 'xlf2',
'xlf': 'xlf',
'xls': 'xls',
'xml': 'xml',
'yaml': 'yaml'
}
def __init__(self, input_charset=None, output_charset=None, zip_charset=None, file_format=None, target_length=None, target_length_max=None, target_length_percent=None, target_length_percent_value=None, segmentation_rule_id=None, target_segmentation_rule_id=None, android=None, csv=None, dita=None, doc_book=None, doc=None, html=None, idml=None, json=None, mac=None, md=None, mif=None, multilingual_xls=None, multilingual_xml=None, pdf=None, php=None, po=None, ppt=None, properties=None, psd=None, quark_tag=None, resx=None, sdl_xlf=None, tm_match=None, ttx=None, txt=None, xlf2=None, xlf=None, xls=None, xml=None, yaml=None):
self._input_charset = None
self._output_charset = None
self._zip_charset = None
self._file_format = None
self._target_length = None
self._target_length_max = None
self._target_length_percent = None
self._target_length_percent_value = None
self._segmentation_rule_id = None
self._target_segmentation_rule_id = None
self._android = None
self._csv = None
self._dita = None
self._doc_book = None
self._doc = None
self._html = None
self._idml = None
self._json = None
self._mac = None
self._md = None
self._mif = None
self._multilingual_xls = None
self._multilingual_xml = None
self._pdf = None
self._php = None
self._po = None
self._ppt = None
self._properties = None
self._psd = None
self._quark_tag = None
self._resx = None
self._sdl_xlf = None
self._tm_match = None
self._ttx = None
self._txt = None
self._xlf2 = None
self._xlf = None
self._xls = None
self._xml = None
self._yaml = None
self.discriminator = None
if input_charset is not None:
self.input_charset = input_charset
if output_charset is not None:
self.output_charset = output_charset
if zip_charset is not None:
self.zip_charset = zip_charset
if file_format is not None:
self.file_format = file_format
if target_length is not None:
self.target_length = target_length
if target_length_max is not None:
self.target_length_max = target_length_max
if target_length_percent is not None:
self.target_length_percent = target_length_percent
if target_length_percent_value is not None:
self.target_length_percent_value = target_length_percent_value
if segmentation_rule_id is not None:
self.segmentation_rule_id = segmentation_rule_id
if target_segmentation_rule_id is not None:
self.target_segmentation_rule_id = target_segmentation_rule_id
if android is not None:
self.android = android
if csv is not None:
self.csv = csv
if dita is not None:
self.dita = dita
if doc_book is not None:
self.doc_book = doc_book
if doc is not None:
self.doc = doc
if html is not None:
self.html = html
if idml is not None:
self.idml = idml
if json is not None:
self.json = json
if mac is not None:
self.mac = mac
if md is not None:
self.md = md
if mif is not None:
self.mif = mif
if multilingual_xls is not None:
self.multilingual_xls = multilingual_xls
if multilingual_xml is not None:
self.multilingual_xml = multilingual_xml
if pdf is not None:
self.pdf = pdf
if php is not None:
self.php = php
if po is not None:
self.po = po
if ppt is not None:
self.ppt = ppt
if properties is not None:
self.properties = properties
if psd is not None:
self.psd = psd
if quark_tag is not None:
self.quark_tag = quark_tag
if resx is not None:
self.resx = resx
if sdl_xlf is not None:
self.sdl_xlf = sdl_xlf
if tm_match is not None:
self.tm_match = tm_match
if ttx is not None:
self.ttx = ttx
if txt is not None:
self.txt = txt
if xlf2 is not None:
self.xlf2 = xlf2
if xlf is not None:
self.xlf = xlf
if xls is not None:
self.xls = xls
if xml is not None:
self.xml = xml
if yaml is not None:
self.yaml = yaml
@property
def input_charset(self):
return self._input_charset
@input_charset.setter
def input_charset(self, input_charset):
self._input_charset = input_charset
@property
def output_charset(self):
return self._output_charset
@output_charset.setter
def output_charset(self, output_charset):
self._output_charset = output_charset
@property
def zip_charset(self):
return self._zip_charset
@zip_charset.setter
def zip_charset(self, zip_charset):
self._zip_charset = zip_charset
@property
def file_format(self):
return self._file_format
@file_format.setter
def file_format(self, file_format):
allowed_values = ["doc", "ppt", "xls", "xlf", "xlf2", "sdlxliff", "ttx", "html", "xml", "mif", "tmx", "idml", "dita", "json", "po", "ts", "icml", "yaml", "properties", "csv", "android_string", "desktop_entry", "mac_strings", "pdf", "windows_rc", "xml_properties", "joomla_ini", "magento_csv", "dtd", "mozilla_properties", "plist", "plain_text", "srt", "sub", "sbv", "wiki", "resx", "resjson", "chrome_json", "epub", "svg", "docbook", "wpxliff", "multiling_xml", "multiling_xls", "mqxliff", "php", "psd", "tag", "md", "vtt"]
if file_format not in allowed_values:
raise ValueError(
"Invalid value for `file_format` ({0}), must be one of {1}"
.format(file_format, allowed_values)
)
self._file_format = file_format
@property
def target_length(self):
return self._target_length
@target_length.setter
def target_length(self, target_length):
self._target_length = target_length
@property
def target_length_max(self):
return self._target_length_max
@target_length_max.setter
def target_length_max(self, target_length_max):
self._target_length_max = target_length_max
@property
def target_length_percent(self):
return self._target_length_percent
@target_length_percent.setter
def target_length_percent(self, target_length_percent):
self._target_length_percent = target_length_percent
@property
def target_length_percent_value(self):
return self._target_length_percent_value
@target_length_percent_value.setter
def target_length_percent_value(self, target_length_percent_value):
self._target_length_percent_value = target_length_percent_value
@property
def segmentation_rule_id(self):
return self._segmentation_rule_id
@segmentation_rule_id.setter
def segmentation_rule_id(self, segmentation_rule_id):
self._segmentation_rule_id = segmentation_rule_id
@property
def target_segmentation_rule_id(self):
return self._target_segmentation_rule_id
@target_segmentation_rule_id.setter
def target_segmentation_rule_id(self, target_segmentation_rule_id):
self._target_segmentation_rule_id = target_segmentation_rule_id
@property
def android(self):
return self._android
@android.setter
def android(self, android):
self._android = android
@property
def csv(self):
return self._csv
@csv.setter
def csv(self, csv):
self._csv = csv
@property
def dita(self):
return self._dita
@dita.setter
def dita(self, dita):
self._dita = dita
@property
def doc_book(self):
return self._doc_book
@doc_book.setter
def doc_book(self, doc_book):
self._doc_book = doc_book
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, doc):
self._doc = doc
@property
def html(self):
return self._html
@html.setter
def html(self, html):
self._html = html
@property
def idml(self):
return self._idml
@idml.setter
def idml(self, idml):
self._idml = idml
@property
def json(self):
return self._json
@json.setter
def json(self, json):
self._json = json
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac
@property
def md(self):
return self._md
@md.setter
def md(self, md):
self._md = md
@property
def mif(self):
return self._mif
@mif.setter
def mif(self, mif):
self._mif = mif
@property
def multilingual_xls(self):
return self._multilingual_xls
@multilingual_xls.setter
def multilingual_xls(self, multilingual_xls):
self._multilingual_xls = multilingual_xls
@property
def multilingual_xml(self):
return self._multilingual_xml
@multilingual_xml.setter
def multilingual_xml(self, multilingual_xml):
self._multilingual_xml = multilingual_xml
@property
def pdf(self):
return self._pdf
@pdf.setter
def pdf(self, pdf):
self._pdf = pdf
@property
def php(self):
return self._php
@php.setter
def php(self, php):
self._php = php
@property
def po(self):
return self._po
@po.setter
def po(self, po):
self._po = po
@property
def ppt(self):
return self._ppt
@ppt.setter
def ppt(self, ppt):
self._ppt = ppt
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, properties):
self._properties = properties
@property
def psd(self):
return self._psd
@psd.setter
def psd(self, psd):
self._psd = psd
@property
def quark_tag(self):
return self._quark_tag
@quark_tag.setter
def quark_tag(self, quark_tag):
self._quark_tag = quark_tag
@property
def resx(self):
return self._resx
@resx.setter
|
Apache License 2.0
|
cheind/pytorch-blender
|
pkg_blender/blendtorch/btb/camera.py
|
Camera.__init__
|
python
|
def __init__(self, bpy_camera=None, shape=None):
self.bpy_camera = bpy_camera or bpy.context.scene.camera
self.shape = shape or Camera.shape_from_bpy()
self.view_matrix = Camera.view_from_bpy(self.bpy_camera)
self.proj_matrix = Camera.proj_from_bpy(self.bpy_camera, self.shape)
|
Initialize camera object
Params
------
bpy_camera: bpy.types.Camera, None
Blender camera to attach to. When None, uses the scenes
default camera.
shape: tuple, None
(H,W) of image to create. When None, uses the default
render settings.
|
https://github.com/cheind/pytorch-blender/blob/eb5effb033094d037e7bdc2238c00806be7012ae/pkg_blender/blendtorch/btb/camera.py#L19-L34
|
import bpy, bpy_extras
from mathutils import Vector
import numpy as np
from . import utils
class Camera:
|
MIT License
|
tomplus/kubernetes_asyncio
|
kubernetes_asyncio/client/api/batch_v2alpha1_api.py
|
BatchV2alpha1Api.patch_namespaced_cron_job_with_http_info
|
python
|
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
|
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/api/batch_v2alpha1_api.py#L949-L1072
|
from __future__ import absolute_import
import re
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import (
ApiTypeError,
ApiValueError
)
class BatchV2alpha1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs)
def get_api_resources_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cron_job_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
|
Apache License 2.0
|
drexly/openhgsenti
|
lib/django/contrib/gis/gdal/layer.py
|
Layer.get_fields
|
python
|
def get_fields(self, field_name):
if field_name not in self.fields:
raise GDALException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self]
|
Returns a list containing the given field name for every Feature
in the Layer.
|
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/gdal/layer.py#L191-L198
|
from ctypes import byref, c_double
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.feature import Feature
from django.contrib.gis.gdal.field import OGRFieldTypes
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import (
ds as capi, geom as geom_api, srs as srs_api,
)
from django.contrib.gis.gdal.srs import SpatialReference
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
class Layer(GDALBase):
def __init__(self, layer_ptr, ds):
if not layer_ptr:
raise GDALException('Cannot create Layer, invalid pointer given')
self.ptr = layer_ptr
self._ds = ds
self._ldefn = capi.get_layer_defn(self._ptr)
self._random_read = self.test_capability(b'RandomRead')
def __getitem__(self, index):
if isinstance(index, six.integer_types):
if index < 0:
raise OGRIndexError('Negative indices are not allowed on OGR Layers.')
return self._make_feature(index)
elif isinstance(index, slice):
start, stop, stride = index.indices(self.num_feat)
return [self._make_feature(fid) for fid in range(start, stop, stride)]
else:
raise TypeError('Integers and slices may only be used when indexing OGR Layers.')
def __iter__(self):
capi.reset_reading(self._ptr)
for i in range(self.num_feat):
yield Feature(capi.get_next_feature(self._ptr), self)
def __len__(self):
return self.num_feat
def __str__(self):
return self.name
def _make_feature(self, feat_id):
if self._random_read:
try:
return Feature(capi.get_feature(self.ptr, feat_id), self)
except GDALException:
pass
else:
for feat in self:
if feat.fid == feat_id:
return feat
raise OGRIndexError('Invalid feature id: %s.' % feat_id)
@property
def extent(self):
env = OGREnvelope()
capi.get_extent(self.ptr, byref(env), 1)
return Envelope(env)
@property
def name(self):
name = capi.get_fd_name(self._ldefn)
return force_text(name, self._ds.encoding, strings_only=True)
@property
def num_feat(self, force=1):
return capi.get_feature_count(self.ptr, force)
@property
def num_fields(self):
return capi.get_field_count(self._ldefn)
@property
def geom_type(self):
return OGRGeomType(capi.get_fd_geom_type(self._ldefn))
@property
def srs(self):
try:
ptr = capi.get_layer_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(ptr))
except SRSException:
return None
@property
def fields(self):
return [force_text(capi.get_field_name(capi.get_field_defn(self._ldefn, i)),
self._ds.encoding, strings_only=True)
for i in range(self.num_fields)]
@property
def field_types(self):
return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))]
for i in range(self.num_fields)]
@property
def field_widths(self):
return [capi.get_field_width(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
@property
def field_precisions(self):
return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
def _get_spatial_filter(self):
try:
return OGRGeometry(geom_api.clone_geom(capi.get_spatial_filter(self.ptr)))
except GDALException:
return None
def _set_spatial_filter(self, filter):
if isinstance(filter, OGRGeometry):
capi.set_spatial_filter(self.ptr, filter.ptr)
elif isinstance(filter, (tuple, list)):
if not len(filter) == 4:
raise ValueError('Spatial filter list/tuple must have 4 elements.')
xmin, ymin, xmax, ymax = map(c_double, filter)
capi.set_spatial_filter_rect(self.ptr, xmin, ymin, xmax, ymax)
elif filter is None:
capi.set_spatial_filter(self.ptr, None)
else:
raise TypeError('Spatial filter must be either an OGRGeometry instance, a 4-tuple, or None.')
spatial_filter = property(_get_spatial_filter, _set_spatial_filter)
|
Apache License 2.0
|
tylerl/filterpipes
|
filterpipes.py
|
FilterPipesCommandBase.filter
|
python
|
def filter(self, data):
return None
|
Perform transformation on document text.
Args:
data: string containing selected text.
Returns:
string containing the desired replacement, or None to
indicate no operation.
|
https://github.com/tylerl/filterpipes/blob/067b0eb2a0a5bbdfbe56c3e73e00b7f08f5f8954/filterpipes.py#L67-L78
|
__author__ = 'Tyler Larson [github.com/tylerl/]'
__version__ = '1.1.0'
__license__ = 'Apache 2'
__copyright__ = 'Copyright 2015, Google Inc.'
import subprocess
import sublime
import sublime_plugin
import sys
import re
if sys.version_info[0] == 2:
PYTHON2=True
def is_str(obj):
return isinstance(obj, basestring)
else:
PYTHON2=False
def is_str(obj):
return isinstance(obj, str)
class FilterPipesCommandBase(sublime_plugin.TextCommand):
use_selections = True
errors_on_statusbar = True
report_success = True
report_failure = True
report_nochange = True
|
Apache License 2.0
|
sneakersinc/sniffmypacketsv2
|
src/sniffmypacketsv2/transforms/common/layers/http.py
|
_canonicalize_header
|
python
|
def _canonicalize_header(name):
return name.strip().lower()
|
Takes a header key (i.e., "Host" in "Host: www.google.com",
and returns a canonical representation of it
|
https://github.com/sneakersinc/sniffmypacketsv2/blob/55d8ff70eedb4dd948351425c25a1e904ea6d50e/src/sniffmypacketsv2/transforms/common/layers/http.py#L10-L13
|
import re
from scapy.all import TCP, bind_layers, Packet, StrField
|
Apache License 2.0
|
gabrielstanovsky/props
|
props/graph_representation/graph_utils.py
|
reverse_graph_edges
|
python
|
def reverse_graph_edges(graph):
ret_graph = digraph()
for node in graph.nodes():
ret_graph.add_node(node)
for (u, v) in graph.edges():
ret_graph.add_edge((v, u),
label = graph.edge_label((u, v)))
return ret_graph
|
Returns a reversed version of the input graph.
I.e., for each edge (u, v) in graph, there will be an edge (v, u) in the
returned graph.
The labels aren't changed.
|
https://github.com/gabrielstanovsky/props/blob/c6392016214ee582de4eaf364e518078f9bd182b/props/graph_representation/graph_utils.py#L444-L462
|
from pygraph.algorithms.sorting import topological_sorting
from pygraph.classes.digraph import digraph
import subprocess, math, re, nltk
from pygraph.algorithms.accessibility import accessibility
from props.graph_representation.word import NO_INDEX, Word, strip_punctuations
from pygraph.algorithms.traversal import traversal
from pygraph.algorithms.minmax import minimal_spanning_tree, shortest_path
import cgi
import time
from props.graph_representation import newNode
from operator import itemgetter
import logging
def accessibility_wo_self(graph):
ret = accessibility(graph)
for k in ret:
ret[k].remove(k)
return ret
def duplicate_node(graph, node, connectToNeighbours):
dupNode = node.copy()
dupNode.isDuplicated = True
graph.add_node(dupNode)
if connectToNeighbours:
for curNeighbour in graph.neighbors(node):
graph.add_edge((dupNode, curNeighbour), graph.edge_label((node, curNeighbour)))
return dupNode
def get_node_dic(graph, node):
d = {}
for neighbor in graph.neighbors(node):
curLabel = graph.edge_label((node, neighbor))
if curLabel not in d:
d[curLabel] = []
d[curLabel].append(neighbor)
return d
def duplicateEdge(graph, orig, new, newLabel=""):
label = graph.edge_label(orig)
if not label:
label = newLabel
graph.add_edge(edge=new,
label=label)
def findChain(graph, func_ls):
def inner(nodes, func_ls):
if (len(func_ls) == 0):
return []
remaining_nodes = filter(func_ls[0], nodes)
for node in remaining_nodes:
curAns = inner(graph.neighbors(node), func_ls[1:])
if (len(curAns) == len(func_ls) - 1):
return [node] + curAns
return []
return inner(nodes=graph.nodes(),
func_ls=func_ls)
def delete_component(graph, node):
nodes = minimal_spanning_tree(graph=graph,
root=node)
for node in nodes:
graph.del_node(node)
def component_to_string(graph, node):
nodes = minimal_spanning_tree(graph=graph,
root=node)
texts = []
for node in nodes:
texts.extend([w for w in node.get_text(graph) if w.index != NO_INDEX])
chars = '\'\"-,.:;!? '
return " ".join([w.word for w in sorted(texts, key=lambda w:w.index)]).rstrip(chars).lstrip(chars)
def duplicate_component(graph, node):
nodesMap = {}
nodes = minimal_spanning_tree(graph=graph,
root=node)
for curNode in nodes:
dupNode = duplicate_node(graph=graph,
node=curNode,
connectToNeighbours=False)
nodesMap[curNode.uid] = dupNode
for curNode in nodes:
curDupNode = nodesMap[curNode.uid]
for curNeighbour in graph.neighbors(curNode):
curDupNeighbour = nodesMap[curNeighbour.uid]
graph.add_edge(edge=(curDupNode, curDupNeighbour),
label=graph.edge_label((curNode, curNeighbour)))
return nodesMap[node.uid]
def find_nodes(graph, filterFunc):
return filter(filterFunc, graph.nodes())
def find_edges(graph, filterFunc):
return filter(filterFunc, graph.edges())
def join(graph, nodeLs):
combinedNode = reduce(graph_representation.node.join, nodeLs)
graph.add_node(combinedNode)
for curNode in nodeLs:
if curNode.uid in graph.nodesMap:
for child in graph.neighbors(curNode):
graph.add_edge((combinedNode, child), label=graph.edge_label((curNode, child)))
return combinedNode
def generate_possessive_top_node(graph, nodeLs):
ls = []
for node in nodeLs:
if graph_representation.node.isApposition(node):
ls.extend(graph.neighbors(node))
else:
ls.append(node)
topNode = ls[0]
for curNode in ls[1:]:
topNode = graph_representation.node.join(topNode, curNode, graph)
return topNode
def sort_nodes_topologically(graph, nodeLs):
helperGraph = graph.__class__(originalSentence="")
helperGraph.add_nodes(nodeLs)
acc = accessibility(graph)
for node1 in nodeLs:
for node2 in acc[node1]:
if node2 in nodeLs:
if node1.uid != node2.uid:
helperGraph.add_edge((node1, node2))
sorted_nodes = topological_sorting(helperGraph)
return sorted_nodes
def get_min_max_span(graph, node):
minInd = NO_INDEX
maxInd = NO_INDEX
for curNode in traversal(graph, node, 'pre'):
curMin = curNode.minIndex()
curMax = curNode.maxIndex()
maxInd = max(maxInd, curMax)
if curMin != NO_INDEX:
if minInd == NO_INDEX:
minInd = curMin
else:
minInd = min(minInd, curMin)
return (minInd, maxInd)
def sister_nodes(graph, node):
ret = set()
for curIncident in graph.incidents(node):
ret = set.union(ret, set(graph.neighbors(curIncident)))
return ret
def is_following(graph, node1, node2):
node1_max = get_min_max_span(graph, node1)[1]
node2_min = get_min_max_span(graph, node2)[0]
return (node1_max + 1 == node2_min)
def immediate_sister(graph, node1, node2):
return (node2 in sister_nodes(graph, node1) and is_following(graph, node1, node2))
def reattch(graph, node, new_father, label=""):
for curFather in graph.incidents(node):
graph.del_edge(edge=(curFather, node))
graph.add_edge(edge=(new_father, node),
label=label)
def deref(graph, node, rel):
neighbors = graph.neighbors(node)
if isinstance(rel, list):
ret = []
for curRel in rel:
ret.extend([n for n in neighbors if graph.edge_label((node, n)) == curRel])
else:
ret = [n for n in neighbors if graph.edge_label((node, n)) == rel]
return ret
def find_node_by_index_range(graph, start, end):
for node in graph.nodes():
indices = [w.index for w in node.str if w.index != NO_INDEX]
if indices:
if (start >= min(indices)) and (end <= max(indices)):
return node
return False
def to_undirected(graph):
ret = graph.__class__("")
ret.add_nodes(graph.nodes())
for u, v in graph.edges():
if not(ret.has_edge((u,v))):
ret.add_edge(edge=(u, v),
label=graph.edge_label((u, v)))
if not(ret.has_edge((v,u))):
ret.add_edge(edge=(v, u),
label=graph.edge_label((u, v)))
return ret
def shortest_distance(graph, node1, node2):
undirected = to_undirected(graph)
_, d = shortest_path(graph=undirected, source=node1)
if node2 not in d:
return -1
return d[node2]
def find_top_of_component(graph, source_node):
_, d = shortest_path(reverse_graph_edges(graph),
source = source_node)
return max(d.iteritems(),
key = itemgetter(1))[0]
|
MIT License
|
bukun/torcms
|
torcms/handlers/post_handler.py
|
PostHandler.redirect_kind
|
python
|
def redirect_kind(self, postinfo):
logger.warning('info kind:{0} '.format(postinfo.kind))
if postinfo.kind == self.kind:
pass
else:
self.redirect('/{0}/{1}'.format(router_post[postinfo.kind],
postinfo.uid),
permanent=True)
|
Redirect according the kind of the post.
:param postinfo: the postinfo
:return: None
|
https://github.com/bukun/torcms/blob/5d7480865fd46e706b84f5f65a5c24cd03bb2142/torcms/handlers/post_handler.py#L369-L383
|
import json
import random
from concurrent.futures import ThreadPoolExecutor
import tornado.escape
import tornado.gen
import tornado.ioloop
import tornado.web
from config import router_post
from torcms.core import privilege, tools
from torcms.core.base_handler import BaseHandler
from torcms.core.tool.sqlite_helper import MAcces
from torcms.core.tools import logger
from torcms.handlers.entity_handler import EntityHandler
from torcms.model.category_model import MCategory
from torcms.model.entity_model import MEntity
from torcms.model.evaluation_model import MEvaluation
from torcms.model.label_model import MPost2Label
from torcms.model.post2catalog_model import MPost2Catalog
from torcms.model.post_hist_model import MPostHist
from torcms.model.post_model import MPost
from torcms.model.relation_model import MRelation
from torcms.model.usage_model import MUsage
def update_category(uid, post_data):
if 'gcat0' in post_data:
pass
else:
return False
the_cats_arr = []
the_cats_dict = {}
def_cate_arr = ['gcat{0}'.format(x) for x in range(10)]
for key in def_cate_arr:
if key not in post_data:
continue
if post_data[key] == '' or post_data[key] == '0':
continue
if post_data[key] in the_cats_arr:
continue
the_cats_arr.append(post_data[key] + ' ' * (4 - len(post_data[key])))
the_cats_dict[key] = post_data[key] + ' ' * (4 - len(post_data[key]))
if the_cats_arr:
def_cat_id = the_cats_arr[0]
else:
def_cat_id = None
if def_cat_id:
the_cats_dict['gcat0'] = def_cat_id
the_cats_dict['def_cat_uid'] = def_cat_id
the_cats_dict['def_cat_pid'] = MCategory.get_by_uid(def_cat_id).pid
logger.info('Update category: {0}'.format(the_cats_arr))
logger.info('Update category: {0}'.format(the_cats_dict))
MPost.update_jsonb(uid, the_cats_dict)
for index, idx_catid in enumerate(the_cats_arr):
MPost2Catalog.add_record(uid, idx_catid, index)
current_infos = MPost2Catalog.query_by_entity_uid(uid, kind='').objects()
for cur_info in current_infos:
if cur_info.tag_id not in the_cats_arr:
MPost2Catalog.remove_relation(uid, cur_info.tag_id)
def update_label(signature, post_data):
current_tag_infos = MPost2Label.get_by_uid(signature).objects()
if 'tags' in post_data:
pass
else:
return False
tags_arr = [x.strip() for x in post_data['tags'].split(',')]
for tag_name in tags_arr:
if tag_name == '':
pass
else:
MPost2Label.add_record(signature, tag_name, 1)
for cur_info in current_tag_infos:
if cur_info.tag_name in tags_arr:
pass
else:
MPost2Label.remove_relation(signature, cur_info.tag_id)
class PostHandler(BaseHandler):
executor = ThreadPoolExecutor(2)
def initialize(self, **kwargs):
super().initialize()
self.kind = kwargs.get('kind', '1')
self.filter_view = kwargs.get('filter_view', False)
self.entity = EntityHandler
def get(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if url_str == '' or url_str == 'index':
self.index()
elif url_arr[0] == '_cat_add':
self._to_add(catid=url_arr[1])
elif url_arr[0] == '_add':
if len(url_arr) == 2:
self._to_add(uid=url_arr[1])
else:
self._to_add()
elif len(url_arr) == 1 and len(url_str) in [4, 5]:
self._view_or_add(url_str)
elif len(url_arr) == 2:
dict_get = {
'_edit_kind': self._to_edit_kind,
'_edit': self._to_edit,
'_delete': self._delete,
}
dict_get.get(url_arr[0])(url_arr[1])
else:
self.show404()
def post(self, *args, **kwargs):
url_str = args[0]
logger.info('Post url: {0}'.format(url_str))
url_arr = self.parse_url(url_str)
if url_arr[0] in ['_edit']:
self.update(url_arr[1])
elif url_arr[0] in ['_add']:
if len(url_arr) == 2:
self.add(uid=url_arr[1])
else:
self.add()
elif url_arr[0] == '_edit_kind':
self._change_kind(url_arr[1])
elif url_arr[0] in ['_cat_add']:
self.add(catid=url_arr[1])
elif len(url_arr) == 1:
if len(url_str) in [4, 5]:
self.add(uid=url_str)
elif url_arr[0] == 'rel' and len(url_arr) == 3:
self._add_relation(url_arr[1], url_arr[2])
else:
self.show404()
def index(self):
self.render('post_{0}/post_index.html'.format(self.kind),
userinfo=self.userinfo,
kwd={
'uid': '',
})
def _gen_uid(self):
cur_uid = self.kind + tools.get_uu4d()
while MPost.get_by_uid(cur_uid):
cur_uid = self.kind + tools.get_uu4d()
return cur_uid
def _get_tmpl_view(self, rec):
if 'def_cat_uid' in rec.extinfo and rec.extinfo['def_cat_uid'] != '':
cat_id = rec.extinfo['def_cat_uid']
elif 'gcat0' in rec.extinfo and rec.extinfo['gcat0'] != '':
cat_id = rec.extinfo['gcat0']
else:
cat_id = None
logger.info('For templates: catid: {0}, filter_view: {1}'.format(
cat_id, self.filter_view))
if cat_id and self.filter_view:
tmpl = 'autogen/view/view_{0}.html'.format(cat_id)
else:
tmpl = 'post_{0}/post_view.html'.format(self.kind)
return tmpl
@tornado.web.authenticated
@privilege.auth_add
def _to_add_with_category(self, catid):
catinfo = MCategory.get_by_uid(catid)
kwd = {
'uid': self._gen_uid(),
'userid': self.userinfo.user_name if self.userinfo else '',
'gcat0': catid,
'parentname': MCategory.get_by_uid(catinfo.pid).name,
'catname': MCategory.get_by_uid(catid).name,
}
self.render('autogen/add/add_{0}.html'.format(catid),
userinfo=self.userinfo,
kwd=kwd)
def _view_or_add(self, uid):
postinfo = MPost.get_by_uid(uid)
if postinfo:
self.viewinfo(postinfo)
elif self.userinfo:
self._to_add(uid=uid)
else:
self.show404()
@tornado.web.authenticated
@privilege.auth_add
def _to_add(self, **kwargs):
if 'catid' in kwargs:
catid = kwargs['catid']
return self._to_add_with_category(catid)
else:
if 'uid' in kwargs and MPost.get_by_uid(kwargs['uid']):
uid = kwargs['uid']
else:
uid = ''
self.render('post_{0}/post_add.html'.format(self.kind),
tag_infos=MCategory.query_all(by_order=True,
kind=self.kind),
userinfo=self.userinfo,
kwd={
'uid': uid,
})
@tornado.web.authenticated
@privilege.auth_edit
def _to_edit(self, infoid):
postinfo = MPost.get_by_uid(infoid)
if postinfo:
pass
else:
return self.show404()
if 'def_cat_uid' in postinfo.extinfo:
catid = postinfo.extinfo['def_cat_uid']
elif 'gcat0' in postinfo.extinfo:
catid = postinfo.extinfo['gcat0']
else:
catid = ''
if len(catid) == 4:
pass
else:
catid = ''
catinfo = None
p_catinfo = None
post2catinfo = MPost2Catalog.get_first_category(postinfo.uid)
if post2catinfo:
catid = post2catinfo.tag_id
catinfo = MCategory.get_by_uid(catid)
if catinfo:
p_catinfo = MCategory.get_by_uid(catinfo.pid)
kwd = {
'gcat0': catid,
'parentname': '',
'catname': '',
'parentlist': MCategory.get_parent_list(),
'userip': self.request.remote_ip,
'extinfo': json.dumps(postinfo.extinfo,
indent=2,
ensure_ascii=False),
}
if self.filter_view:
tmpl = 'autogen/edit/edit_{0}.html'.format(catid)
else:
tmpl = 'post_{0}/post_edit.html'.format(self.kind)
logger.info('Meta template: {0}'.format(tmpl))
self.render(tmpl,
kwd=kwd,
postinfo=postinfo,
catinfo=catinfo,
pcatinfo=p_catinfo,
userinfo=self.userinfo,
cat_enum=MCategory.get_qian2(catid[:2]),
tag_infos=MCategory.query_all(by_order=True,
kind=self.kind),
tag_infos2=MCategory.query_all(by_order=True,
kind=self.kind),
app2tag_info=MPost2Catalog.query_by_entity_uid(
infoid, kind=self.kind).objects(),
app2label_info=MPost2Label.get_by_uid(infoid).objects())
def _gen_last_current_relation(self, post_id):
last_post_id = self.get_secure_cookie('last_post_uid')
if last_post_id:
last_post_id = last_post_id.decode('utf-8')
self.set_secure_cookie('last_post_uid', post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id)
|
MIT License
|
acq4/acq4
|
acq4/drivers/SutterMPC200/mpc200.py
|
SutterMPC200.setDrive
|
python
|
def setDrive(self, drive):
cmd = 'I' + chr(drive)
cmd = cmd.encode('utf8')
self.write(cmd)
ret = self.read(2, term=b'\r')
if ord(ret) == drive:
return
else:
raise Exception('MPC200: Drive %d is not connected' % drive)
|
Set the current drive (1-4)
|
https://github.com/acq4/acq4/blob/4c0d9cdaf4740359023fd323f671e9af3c115d2e/acq4/drivers/SutterMPC200/mpc200.py#L108-L117
|
from __future__ import print_function
import serial, struct, time, collections
import numpy as np
from six.moves import range
try:
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, DataError
except ValueError:
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from acq4.drivers.SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
def resetDrive(method):
def resetDrive(self, *args, **kwds):
active = self.getActiveDrive()
try:
return method(self, *args, **kwds)
finally:
self.setDrive(active)
return resetDrive
class SutterMPC200(SerialDevice):
DEVICES = {}
speedTable = {
0: 0.0003379,
1: 0.0003606,
2: 0.000383,
3: 0.000412,
4: 0.0004408,
5: 0.0004782,
6: 0.0005233,
7: 0.0005726,
8: 0.0006381,
9: 0.000718,
10: 0.0008146,
11: 0.0009575,
12: 0.001139,
13: 0.001404,
14: 0.00189,
15: 0.002767,
'fast': 0.00465
}
@classmethod
def getDevice(cls, port):
port = SerialDevice.normalizePortName(port)
if port in cls.DEVICES:
return cls.DEVICES[port]
else:
return SutterMPC200(port=port)
def __init__(self, port):
port = SerialDevice.normalizePortName(port)
if port in SutterMPC200.DEVICES:
raise Exception("The port %s is already accessed by another instance of this class. Use getDevice(port) instead.")
SutterMPC200.DEVICES[port] = self
self.lock = RLock()
self.port = port
SerialDevice.__init__(self, port=self.port, baudrate=128000)
self.scale = [0.0625e-6]*3
self._moving = False
@threadsafe
|
MIT License
|
timothyb0912/pylogit
|
src/pylogit/construct_estimator.py
|
create_estimation_obj
|
python
|
def create_estimation_obj(model_obj,
init_vals,
mappings=None,
ridge=None,
constrained_pos=None,
weights=None):
mapping_matrices = model_obj.get_mappings_for_fit() if mappings is None else mappings
zero_vector = np.zeros(init_vals.shape[0])
internal_model_name = display_name_to_model_type[model_obj.model_type]
estimator_class, current_split_func = (model_type_to_resources[internal_model_name]['estimator'],
model_type_to_resources[internal_model_name]['split_func'])
estimation_obj = estimator_class(model_obj,
mapping_matrices,
ridge,
zero_vector,
current_split_func,
constrained_pos,
weights=weights)
return estimation_obj
|
Should return a model estimation object corresponding to the model type of
the `model_obj`.
Parameters
----------
model_obj : an instance or sublcass of the MNDC class.
init_vals : 1D ndarray.
The initial values to start the estimation process with. In the
following order, there should be one value for each nest coefficient,
shape parameter, outside intercept parameter, or index coefficient that
is being estimated.
mappings : OrderedDict or None, optional.
Keys will be `["rows_to_obs", "rows_to_alts", "chosen_row_to_obs",
"rows_to_nests"]`. The value for `rows_to_obs` will map the rows of
the `long_form` to the unique observations (on the columns) in
their order of appearance. The value for `rows_to_alts` will map
the rows of the `long_form` to the unique alternatives which are
possible in the dataset (on the columns), in sorted order--not
order of appearance. The value for `chosen_row_to_obs`, if not
None, will map the rows of the `long_form` that contain the chosen
alternatives to the specific observations those rows are associated
with (denoted by the columns). The value of `rows_to_nests`, if not
None, will map the rows of the `long_form` to the nest (denoted by
the column) that contains the row's alternative. Default == None.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If a
scalar is passed, then that scalar determines the ridge penalty for
the optimization. The scalar should be greater than or equal to
zero. Default `== None`.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`init_vals.size.` Default == None.
weights : 1D ndarray.
Should contain the weights for each corresponding observation for each
row of the long format data.
|
https://github.com/timothyb0912/pylogit/blob/cffc9c523b5368966ef2481c7dc30f0a5d296de8/src/pylogit/construct_estimator.py#L55-L120
|
from __future__ import absolute_import
import numpy as np
from .display_names import model_type_to_display_name as display_name_dict
from .mixed_logit import MixedEstimator
from .mixed_logit import split_param_vec as mixed_split_params
from .nested_logit import NestedEstimator
from .nested_logit import split_param_vec as nested_split_params
from .conditional_logit import MNLEstimator
from .conditional_logit import split_param_vec as mnl_split_params
from .clog_log import ClogEstimator
from .clog_log import split_param_vec as clog_split_params
from .asym_logit import AsymEstimator
from .asym_logit import split_param_vec as asym_split_params
from .scobit import ScobitEstimator
from .scobit import split_param_vec as scobit_split_params
from .uneven_logit import UnevenEstimator
from .uneven_logit import split_param_vec as uneven_split_params
display_name_to_model_type = {v: k for k, v in display_name_dict.items()}
model_type_to_resources = {"MNL": {'estimator': MNLEstimator, 'split_func': mnl_split_params},
"Asym": {'estimator': AsymEstimator, 'split_func': asym_split_params},
"Cloglog": {'estimator': ClogEstimator, 'split_func': clog_split_params},
"Scobit": {'estimator': ScobitEstimator,
'split_func': scobit_split_params},
"Uneven": {'estimator': UnevenEstimator,
'split_func': uneven_split_params},
"Nested Logit": {'estimator': NestedEstimator,
'split_func': nested_split_params},
"Mixed Logit": {'estimator': MixedEstimator,
'split_func': mixed_split_params}}
|
BSD 3-Clause New or Revised License
|
ww-tech/primrose
|
primrose/configuration/util.py
|
ConfigurationSectionType.values
|
python
|
def values():
return list(map(lambda t: t.value, ConfigurationSectionType))
|
list of the enum's values
|
https://github.com/ww-tech/primrose/blob/ab3733dea316e3bea3659493587f97955cf6d983/primrose/configuration/util.py#L30-L32
|
from enum import Enum
class ConfigurationError(Exception):
pass
class ConfigurationSectionType(Enum):
METADATA = "metadata"
IMPLEMENTATION_CONFIG = "implementation_config"
@staticmethod
|
Apache License 2.0
|
doudz/homeassistant-myjdownloader
|
custom_components/myjdownloader/entities.py
|
MyJDownloaderDeviceEntity.stop_downloads
|
python
|
async def stop_downloads(self):
device = self.hub.get_device(self._device_id)
await self.hub.async_query(device.update.stop_downloads)
|
Service call to stop downloads.
|
https://github.com/doudz/homeassistant-myjdownloader/blob/632a356b969a1144b56f0edf7af673cbb997d5e5/custom_components/myjdownloader/entities.py#L131-L134
|
import logging
from string import Template
from myjdapi.exception import MYJDConnectionException
from homeassistant.helpers.entity import Entity
from . import MyJDownloaderHub
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class MyJDownloaderEntity(Entity):
def __init__(
self,
hub: MyJDownloaderHub,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
self._available = True
self._enabled_default = enabled_default
self._icon = icon
self._name = name
self.hub = hub
@property
def name(self) -> str:
return self._name
@property
def icon(self) -> str:
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
return self._enabled_default
@property
def available(self) -> bool:
return self._available
async def async_update(self) -> None:
if not self.enabled:
return
try:
await self._myjdownloader_update()
self._available = True
except MYJDConnectionException:
self._available = False
except Exception:
if self._available:
_LOGGER.debug(
"An error occurred while updating MyJDownloader sensor",
exc_info=True,
)
self._available = False
async def _myjdownloader_update(self) -> None:
raise NotImplementedError()
class MyJDownloaderDeviceEntity(MyJDownloaderEntity):
def __init__(
self,
hub: MyJDownloaderHub,
device_id: str,
name_template: str,
icon: str,
enabled_default: bool = True,
) -> None:
self._device_id = device_id
device = hub.get_device(self._device_id)
self._device_name = device.name
self._device_type = device.device_type
name = Template(name_template).substitute(device_name=self._device_name)
super().__init__(hub, name, icon, enabled_default)
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._device_id)},
"name": f"JDownloader {self._device_name}",
"manufacturer": "AppWork GmbH",
"model": self._device_type,
"sw_version": None,
"entry_type": "service",
}
async def async_update(self) -> None:
if self._device_id in self.hub.devices:
self._available = True
await super().async_update()
else:
self._available = False
async def restart_and_update(self):
device = self.hub.get_device(self._device_id)
await self.hub.async_query(device.update.restart_and_update)
async def run_update_check(self):
device = self.hub.get_device(self._device_id)
await self.hub.async_query(device.update.run_update_check)
async def start_downloads(self):
device = self.hub.get_device(self._device_id)
await self.hub.async_query(device.update.start_downloads)
|
MIT License
|
chrklemm/sesmg
|
program_files/optimize_model.py
|
least_cost_model
|
python
|
def least_cost_model(energy_system: solph.EnergySystem, num_threads: int,
nodes_data: dict, busd: dict, solver: str) -> solph.Model:
import logging
import math
import pyomo.environ as po
logging.info(
' ' + "******************************************************"
+ "***")
logging.info(' ' + 'Create Energy System...')
om = solph.Model(energy_system)
if (str(next(nodes_data["energysystem"].iterrows())[1]
["constraint cost limit"]) != 'none'
and
str(next(nodes_data["energysystem"].iterrows())[1]
["constraint cost limit"]) != 'None'):
limit = float(next(nodes_data["energysystem"].iterrows())[1]
["constraint cost limit"])
om = constraint_optimization_against_two_values(om, limit)
if "competition constraints" in nodes_data:
om = competition_constraint(om, nodes_data, energy_system)
for j, z in nodes_data['links'].iterrows():
for i, b in om.flows.keys():
if isinstance(i, solph.custom.Link) and str(i) == z['label']:
if z['(un)directed'] == 'undirected':
p = energy_system.groups[z['label']]
solph.constraints.equate_variables(
om,
om.InvestmentFlow.invest[p, busd[z['bus1']]],
om.InvestmentFlow.invest[p, busd[z['bus2']]]
)
elif z['(un)directed'] == 'directed':
p = energy_system.groups[z['label']]
def input_rule(om, t):
inflow = (om.flow[busd[z['bus2']], p, t])
return inflow == 0
om.InvestmentFlow.invest[p, busd[z['bus1']]] = 0
setattr(om, z['label'] + "input_constraint",
po.Constraint(om.TIMESTEPS, expr=input_rule))
logging.info(
' ' + "******************************************************"
+ "***")
logging.info(' '+"Starting Optimization with "+solver+"-Solver")
om.solve(solver=solver, cmdline_options={"threads": num_threads})
return om
|
Solves a given energy system model.
Solves a given energy system for least costs and returns the
optimized energy system.
:param energy_system: energy system consisting a number of \
components
:type energy_system: oemof.solph.Energysystem
:param num_threads: number of threads the solver is allowed to use
:type num_threads: int
:param nodes_data: dictionary containing all components \
information out of the excel spreadsheet
:type nodes_data: dict
:param busd: dictionary containing the buses of the energysystem
:type busd: dict
:return: - **om** (oemof.solph.Model) - solved oemof model
Christian Klemm - christian.klemm@fh-muenster.de
|
https://github.com/chrklemm/sesmg/blob/382ffd600b98d3cc6df53abed0cb3526187cb1cf/program_files/optimize_model.py#L120-L204
|
from oemof import solph
def constraint_optimization_against_two_values(om: solph.Model,
limit: float) -> solph.Model:
import pyomo.environ as po
from oemof.solph.plumbing import sequence
invest_flows = {}
for (i, o) in om.flows:
if hasattr(om.flows[i, o].investment, "periodical_constraint_costs"):
invest_flows[(i, o)] = om.flows[i, o].investment
limit_name = "invest_limit_" + "space"
setattr(om, limit_name, po.Expression(
expr=sum(om.InvestmentFlow.invest[inflow, outflow] *
getattr(invest_flows[inflow, outflow],
"periodical_constraint_costs")
for (inflow, outflow) in invest_flows
)))
flows = {}
for (i, o) in om.flows:
if hasattr(om.flows[i, o], "emission_factor"):
flows[(i, o)] = om.flows[i, o]
limit_name1 = "integral_limit_" + "emission_factor"
setattr(om, limit_name1, po.Expression(
expr=sum(om.flow[inflow, outflow, t]
* om.timeincrement[t]
* sequence(getattr(flows[inflow, outflow],
"emission_factor"))[t]
for (inflow, outflow) in flows
for t in om.TIMESTEPS)))
setattr(om, limit_name + "_constraint", po.Constraint(
expr=((getattr(om, limit_name) + getattr(om, limit_name1)) <= limit)))
return om
def competition_constraint(om, nd, energy_system):
import pyomo.environ as po
for k, j in nd['competition constraints'].iterrows():
if j["active"]:
flows = {}
for i, o in om.flows:
if i == energy_system.groups[j['component 1']]:
if o == (list(energy_system.groups[
j['component 1']].outputs)[0]):
setattr(om.flows[i, o], "competition_factor",
j['factor 1'])
flows[(i, o)] = om.flows[i, o]
elif i == energy_system.groups[j['component 2']]:
setattr(om.flows[i, o], "competition_factor",
j['factor 2'])
flows[(i, o)] = om.flows[i, o]
def competition_rule(om):
competition_flow = sum(om.InvestmentFlow.invest[i, o]
* om.flows[i, o].competition_factor
for (i, o) in flows)
limit = j['limit']
limit = limit - (sum(om.flows[i, o].investment.existing
for (i, o) in flows))
return (limit >= competition_flow)
setattr(om, j['component 1'] + '_' + j['component 2']
+ "competition_constraint",
po.Constraint(om.TIMESTEPS, expr=competition_rule))
return om
|
MIT License
|
mkdocs/mkdocs
|
mkdocs/utils/__init__.py
|
yaml_load
|
python
|
def yaml_load(source, loader=None):
Loader = loader or get_yaml_loader()
result = yaml.load(source, Loader=Loader)
if result is not None and 'INHERIT' in result:
relpath = result.pop('INHERIT')
abspath = os.path.normpath(os.path.join(os.path.dirname(source.name), relpath))
if not os.path.exists(abspath):
raise exceptions.ConfigurationError(
f"Inherited config file '{relpath}' does not exist at '{abspath}'.")
log.debug(f"Loading inherited configuration file: {abspath}")
with open(abspath, 'rb') as fd:
parent = yaml_load(fd, Loader)
result = merge(parent, result)
return result
|
Return dict of source YAML file using loader, recursively deep merging inherited parent.
|
https://github.com/mkdocs/mkdocs/blob/3ebb884e7b05e7580fa5d9b192eaa6e5520af9af/mkdocs/utils/__init__.py#L53-L67
|
import logging
import os
import shutil
import re
import yaml
import fnmatch
import posixpath
import functools
import importlib_metadata
from collections import defaultdict
from datetime import datetime, timezone
from urllib.parse import urlsplit
from yaml_env_tag import construct_env_tag
from mergedeep import merge
from mkdocs import exceptions
log = logging.getLogger(__name__)
markdown_extensions = [
'.markdown',
'.mdown',
'.mkdn',
'.mkd',
'.md'
]
def get_yaml_loader(loader=yaml.Loader):
class Loader(loader):
Loader.add_constructor('!ENV', construct_env_tag)
return Loader
|
BSD 2-Clause Simplified License
|
jameskmurphy/nes
|
nes/pycore/system.py
|
NES.init_logging
|
python
|
def init_logging(self, log_file, log_level):
if log_file is None or log_level is None:
logging.disable()
return
logging.addLevelName(LOG_MEMORY, "MEMORY")
logging.addLevelName(LOG_PPU, "PPU")
logging.addLevelName(LOG_CPU, "CPU")
logging.basicConfig(filename=log_file,
level=logging.NOTSET,
format='%(asctime)-15s %(source)-5s %(message)s',
filemode='w',
)
logging.root.setLevel(log_level)
|
Initialize the logging; set the log file and the logging level (LOG_MEMORY, LOG_PPU, LOG_CPU are all below
logging.DEBUG)
|
https://github.com/jameskmurphy/nes/blob/d2fb20be164a766dbb6ad17f4cccb9518455fee0/nes/pycore/system.py#L102-L120
|
import pyximport; pyximport.install()
from nes.pycore.mos6502 import MOS6502
from nes.pycore.memory import NESMappedRAM
from nes.pycore.ppu import NESPPU
from nes.rom import ROM
from nes.peripherals import Screen, KeyboardController, ControllerBase
import pickle
import logging
import pygame
class InterruptListener:
def __init__(self):
self._nmi = False
self._irq = False
self.oam_dma_pause = False
def raise_nmi(self):
self._nmi = True
def reset_nmi(self):
self._nmi = False
def reset_oam_dma_pause(self):
self.oam_dma_pause = False
def raise_oam_dma_pause(self):
self.oam_dma_pause = True
def any_active(self):
return self._nmi or self._irq or self.oam_dma_pause
@property
def nmi_active(self):
return self._nmi
@property
def irq_active(self):
return self._irq
class NES:
PPU_CYCLES_PER_CPU_CYCLE = 3
FRAMERATE_FPS = 240
def __init__(self, rom_file, screen_scale=3, log_file=None, log_level=None, prg_start=None):
self.init_logging(log_file, log_level)
rom = ROM(rom_file, py_compatibility_mode=True)
self.cart = rom.get_cart(prg_start)
self.controller1 = KeyboardController()
self.controller2 = ControllerBase(active=False)
self.interrupt_listener = InterruptListener()
self.ppu = NESPPU(cart=self.cart, interrupt_listener=self.interrupt_listener)
self.screen = Screen(ppu=self.ppu, scale=screen_scale, py_compatibility_mode=True)
self.ppu.screen = self.screen
self.screen_scale = screen_scale
self.memory = NESMappedRAM(ppu=self.ppu,
apu=None,
cart=self.cart,
controller1=self.controller1,
controller2=self.controller2,
interrupt_listener=self.interrupt_listener
)
self.cpu = MOS6502(memory=self.memory,
undocumented_support_level=2,
stack_underflow_causes_exception=False
)
self.cpu.reset()
|
MIT License
|
eric3911/mini_ssd
|
object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
|
SSDResnet50V1FpnFeatureExtractor.__init__
|
python
|
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
super(SSDResnet50V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_50,
'resnet_v1_50',
'fpn',
fpn_min_level,
fpn_max_level,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
|
SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
UNUSED currently.
min_depth: minimum feature extractor depth. UNUSED Currently.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
|
https://github.com/eric3911/mini_ssd/blob/6fb6e1bce3ab6e4adb832b37e78325803c7424b6/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py#L203-L252
|
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
slim = tf.contrib.slim
class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
fpn_scope_name,
fpn_min_level=3,
fpn_max_level=7,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
super(_SSDResnetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if self._depth_multiplier != 1.0:
raise ValueError('Only depth 1.0 is supported, found: {}'.
format(self._depth_multiplier))
if self._use_explicit_padding is True:
raise ValueError('Explicit padding is not a valid option.')
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._fpn_scope_name = fpn_scope_name
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
def _filter_features(self, image_features):
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block1', 'block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def extract_features(self, preprocessed_inputs):
if self._depth_multiplier != 1.0:
raise ValueError('Depth multiplier not supported.')
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
scope=scope)
image_features = self._filter_features(image_features)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope(self._fpn_scope_name,
reuse=self._reuse_weights):
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=256)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(
fpn_features['top_down_block{}'.format(level - 1)])
last_feature_map = fpn_features['top_down_block{}'.format(
base_fpn_max_level - 1)]
for i in range(base_fpn_max_level, self._fpn_max_level):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=256,
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_block{}'.format(i))
feature_maps.append(last_feature_map)
return feature_maps
class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
|
MIT License
|
bennington-hardware-hacking-2019/pos_system
|
tag_reader/smbus2/smbus2/smbus2.py
|
SMBus.read_byte
|
python
|
def read_byte(self, i2c_addr, force=None):
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
|
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
|
https://github.com/bennington-hardware-hacking-2019/pos_system/blob/b204d19bf7695d53d6a0b1ce25ed6490bb06314c/tag_reader/smbus2/smbus2/smbus2.py#L333-L349
|
import os
import sys
from fcntl import ioctl
from ctypes import c_uint32, c_uint8, c_uint16, c_char, POINTER, Structure, Array, Union, create_string_buffer, string_at
I2C_SLAVE = 0x0703
I2C_SLAVE_FORCE = 0x0706
I2C_FUNCS = 0x0705
I2C_RDWR = 0x0707
I2C_SMBUS = 0x0720
I2C_SMBUS_WRITE = 0
I2C_SMBUS_READ = 1
I2C_SMBUS_QUICK = 0
I2C_SMBUS_BYTE = 1
I2C_SMBUS_BYTE_DATA = 2
I2C_SMBUS_WORD_DATA = 3
I2C_SMBUS_PROC_CALL = 4
I2C_SMBUS_BLOCK_DATA = 5
I2C_SMBUS_BLOCK_PROC_CALL = 7
I2C_SMBUS_I2C_BLOCK_DATA = 8
I2C_SMBUS_BLOCK_MAX = 32
I2C_FUNC_I2C = 0x00000001
I2C_FUNC_10BIT_ADDR = 0x00000002
I2C_FUNC_PROTOCOL_MANGLING = 0x00000004
I2C_FUNC_SMBUS_PEC = 0x00000008
I2C_FUNC_NOSTART = 0x00000010
I2C_FUNC_SLAVE = 0x00000020
I2C_FUNC_SMBUS_BLOCK_PROC_CALL = 0x00008000
I2C_FUNC_SMBUS_QUICK = 0x00010000
I2C_FUNC_SMBUS_READ_BYTE = 0x00020000
I2C_FUNC_SMBUS_WRITE_BYTE = 0x00040000
I2C_FUNC_SMBUS_READ_BYTE_DATA = 0x00080000
I2C_FUNC_SMBUS_WRITE_BYTE_DATA = 0x00100000
I2C_FUNC_SMBUS_READ_WORD_DATA = 0x00200000
I2C_FUNC_SMBUS_WRITE_WORD_DATA = 0x00400000
I2C_FUNC_SMBUS_PROC_CALL = 0x00800000
I2C_FUNC_SMBUS_READ_BLOCK_DATA = 0x01000000
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA = 0x02000000
I2C_FUNC_SMBUS_READ_I2C_BLOCK = 0x04000000
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK = 0x08000000
I2C_M_RD = 0x0001
LP_c_uint8 = POINTER(c_uint8)
LP_c_uint16 = POINTER(c_uint16)
LP_c_uint32 = POINTER(c_uint32)
class i2c_smbus_data(Array):
_length_ = I2C_SMBUS_BLOCK_MAX + 2
_type_ = c_uint8
class union_i2c_smbus_data(Union):
_fields_ = [
("byte", c_uint8),
("word", c_uint16),
("block", i2c_smbus_data)
]
union_pointer_type = POINTER(union_i2c_smbus_data)
class i2c_smbus_ioctl_data(Structure):
_fields_ = [
('read_write', c_uint8),
('command', c_uint8),
('size', c_uint32),
('data', union_pointer_type)]
__slots__ = [name for name, type in _fields_]
@staticmethod
def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):
u = union_i2c_smbus_data()
return i2c_smbus_ioctl_data(
read_write=read_write, command=command, size=size,
data=union_pointer_type(u))
class i2c_msg(Structure):
_fields_ = [
('addr', c_uint16),
('flags', c_uint16),
('len', c_uint16),
('buf', POINTER(c_char))]
def __iter__(self):
return i2c_msg_iter(self)
def __len__(self):
return self.len
def __bytes__(self):
return string_at(self.buf, self.len)
def __repr__(self):
return 'i2c_msg(%d,%d,%r)' % (self.addr, self.flags, self.__bytes__())
def __str__(self):
s = self.__bytes__()
if sys.version_info.major >= 3:
s = ''.join(map(chr, s))
return s
@staticmethod
def read(address, length):
arr = create_string_buffer(length)
return i2c_msg(
addr=address, flags=I2C_M_RD, len=length,
buf=arr)
@staticmethod
def write(address, buf):
if sys.version_info.major >= 3:
if type(buf) is str:
buf = bytes(map(ord, buf))
else:
buf = bytes(buf)
else:
if type(buf) is not str:
buf = ''.join([chr(x) for x in buf])
arr = create_string_buffer(buf, len(buf))
return i2c_msg(
addr=address, flags=0, len=len(arr),
buf=arr)
class i2c_rdwr_ioctl_data(Structure):
_fields_ = [
('msgs', POINTER(i2c_msg)),
('nmsgs', c_uint32)
]
__slots__ = [name for name, type in _fields_]
@staticmethod
def create(*i2c_msg_instances):
n_msg = len(i2c_msg_instances)
msg_array = (i2c_msg * n_msg)(*i2c_msg_instances)
return i2c_rdwr_ioctl_data(
msgs=msg_array,
nmsgs=n_msg
)
class i2c_msg_iter:
def __init__(self, msg):
self.msg = msg
self.idx = 0
def __iter__(self):
return self
def __next__(self):
if self.idx < self.msg.len:
val = ord(self.msg.buf[self.idx])
self.idx += 1
return val
else:
raise StopIteration()
def next(self):
return self.__next__()
class SMBus(object):
def __init__(self, bus=None, force=False):
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
|
MIT License
|
jkibele/opticalrs
|
OpticalRS/ErrorMatrix.py
|
ErrorMatrix.proportion_in_reference
|
python
|
def proportion_in_reference( self ):
return np.nan_to_num( self.sum(axis=0).astype(float) / self.sum() )
|
Returns the proportion of pixels that fall into each category for the reference data.
>>> wundram_table2().proportion_in_reference.round(4)
ErrorMatrix([ 0.4642, 0.1623, 0.3245, 0.0302, 0.0189])
|
https://github.com/jkibele/opticalrs/blob/20d73aec1cbabfa54e62214ae3179e3ba375dff9/OpticalRS/ErrorMatrix.py#L208-L215
|
import numpy as np
import csv, itertools
def congalton_table1():
mat = np.array([ [65,4,22,24],[6,81,5,8],[0,11,85,19],[4,7,3,90] ], dtype=int).view( ErrorMatrix )
return mat
def congalton_table2():
sup = np.array([ [68,7,3,0],[12,112,15,10],[3,9,89,0],[0,2,5,56] ], dtype=int).view( ErrorMatrix )
return sup
def wundram_table2():
tab2 = np.array([ [89, 3, 7, 0, 1],
[11,16,10, 1, 0],
[ 2,14,60, 2, 0],
[ 1, 9, 7, 5, 0],
[20, 1, 2, 0, 4] ], dtype=int ).view( ErrorMatrix )
return tab2
def wundram_table3():
tab3 = np.array([ [114, 9, 8, 0, 1],
[ 5,23,11, 1, 0],
[ 1,10,59, 2, 0],
[ 0, 1, 7, 5, 0],
[ 3, 0, 1, 0, 4] ], dtype=int ).view( ErrorMatrix )
return tab3
def ref_array():
ref = np.array([ [4, 4, 3, 4, 4, 3, 2, 4, 0],
[4, 2, 0, 3, 0, 3, 0, 3, 3],
[1, 0, 3, 1, 2, 4, 0, 1, 2],
[0, 4, 4, 1, 3, 3, 1, 2, 0],
[3, 0, 1, 0, 0, 1, 3, 2, 2],
[4, 1, 0, 3, 4, 4, 3, 4, 3],
[4, 3, 4, 1, 4, 0, 0, 2, 4],
[0, 4, 2, 1, 1, 4, 4, 4, 4],
[0, 2, 1, 1, 1, 4, 0, 0, 0],
[4, 2, 3, 0, 4, 4, 4, 1, 0]], dtype=int)
return ref
def comp_array():
comp = np.array([ [4, 4, 3, 4, 1, 1, 2, 4, 0],
[4, 1, 0, 3, 0, 3, 0, 3, 3],
[1, 0, 3, 1, 2, 1, 1, 1, 1],
[0, 4, 4, 1, 1, 3, 1, 2, 0],
[1, 0, 1, 1, 0, 1, 3, 2, 2],
[4, 1, 0, 3, 4, 4, 3, 4, 3],
[1, 3, 4, 1, 1, 1, 0, 2, 4],
[0, 4, 2, 1, 1, 4, 4, 1, 4],
[0, 2, 1, 1, 1, 4, 0, 0, 0],
[4, 2, 3, 0, 1, 4, 1, 1, 0]], dtype=int)
return comp
def validate_comparison( reference, comparison ):
return "your mom"
def error_matrix( reference, comparison, categories=None, unclassified=0 ):
idx = np.where( reference<>unclassified )
all_classes = np.unique( np.vstack( (reference[idx],comparison[idx]) ) )
n = len( all_classes )
em = np.array([z.count(x) for z in [zip(reference.flatten(),comparison.flatten())] for x in itertools.product(all_classes,repeat=2)]).reshape(n,n).view( ErrorMatrix )
if categories:
em.categories = categories
else:
em.categories = all_classes.tolist()
return em
class ErrorMatrix( np.ndarray ):
def __new__(cls, input_array, categories=None, title=None):
if input_array.__class__==str:
input_array = np.genfromtxt(input_array, delimiter=',')
obj = np.asarray(input_array).view(cls)
if categories:
obj.categories = categories
else:
obj.categories = range(1,1+obj.shape[0])
obj.title = title
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.categories = getattr(obj, 'categories', range(1,1+self.shape[0]))
self.title = getattr(obj, 'title', None)
def round(self, *places):
return super(ErrorMatrix, self).round( *places ).view( ErrorMatrix )
@property
def proportions( self ):
return np.nan_to_num( self.astype(float) / self.sum() )
@property
|
BSD 3-Clause New or Revised License
|
intel/tcf
|
tcfl/tl.py
|
linux_ssh_root_nopwd
|
python
|
def linux_ssh_root_nopwd(target, prefix = ""):
target.shell.run('mkdir -p %s/etc/ssh' % prefix)
target.shell.run(
f'grep -qe "^PermitRootLogin yes" {prefix}/etc/ssh/sshd_config'
f' || echo "PermitRootLogin yes" >> {prefix}/etc/ssh/sshd_config')
target.shell.run(
f'grep -qe "^PermitEmptyPasswords yes" {prefix}/etc/ssh/sshd_config'
f' || echo "PermitEmptyPasswords yes" >> {prefix}/etc/ssh/sshd_config')
|
Configure a SSH deamon to allow login as root with no passwords
.. _howto_restart_sshd:
In a script:
>>> tcfl.tl.linux_ssh_root_nopwd(target)
>>> tcfl.tl.linux_sshd_restart(ic, target)
or if doing it by hand, wait for *sshd* to be fully ready; it is a hack:
>>> target.shell.run("systemctl restart sshd")
>>> target.shell.run( # wait for sshd to fully restart
>>> # this assumes BASH
>>> "while ! exec 3<>/dev/tcp/localhost/22; do"
>>> " sleep 1s; done", timeout = 10)
- why not *nc*? easy and simple; not default installed in most distros
- why not *curl*? most distros have it installed; if SSH is replying
with the SSH-2.0 string, then likely the daemon is ready
Recent versions of curl now check for HTTP headers, so can't be
really used for this
- why not plain *ssh*? because that might fail by many other
reasons, but you can check the debug in *ssh -v* messages for a
*debug1: Remote protocol version* string; output is harder to
keep under control and *curl* is kinda faster, but::
$ ssh -v localhost 2>&1 -t echo | fgrep -q 'debug1: Remote protocol version'
is a valid test
- why not *netstat*? for example::
$ while ! netstat -antp | grep -q '^tcp.*:22.*LISTEN.*sshd'; do sleep 1s; done
*netstat* is not always available, when available, that is also
a valid test
Things you can do after this:
1. switch over to an SSH console if configured (they are faster
and depending on the HW, more reliable):
>>> target.console.setup_preferred()
|
https://github.com/intel/tcf/blob/ca8b66d3809ecb441c9a1ae99ff13eb88baf9286/tcfl/tl.py#L490-L547
|
import collections
import datetime
import os
import pyte
import re
import ssl
import time
import traceback
import commonl
import tcfl.tc
def ansi_render_approx(s, width = 80, height = 2000):
assert isinstance(s, str)
assert isinstance(width, int) and width > 20
assert isinstance(height, int) and height > 20
r = ""
screen = pyte.Screen(width, height)
stream = pyte.Stream(screen)
stream.feed(s)
empty_line = width * " "
last = empty_line
skips = 1
for line in screen.display:
if line == empty_line and line == last:
skips += 1
continue
else:
if skips > 1:
r += f"<RENDERER: skipped {skips} empty lines>\n"
skips = 1
last = line
r += line.rstrip() + "\n"
return r
def ipxe_sanboot_url(target, sanboot_url):
target.power.cycle()
boot_ic = target.kws['pos_boot_interconnect']
mac_addr = target.kws['interconnects'][boot_ic]['mac_addr']
tcfl.biosl.boot_network_pxe(
target,
r"UEFI PXEv4 \(MAC:%s\)" % mac_addr.replace(":", "").upper().strip())
target.expect("iPXE initialising devices...")
target.console.write("\x02\x02")
time.sleep(0.3)
target.console.write("\x02\x02")
time.sleep(0.3)
target.console.write("\x02\x02")
time.sleep(0.3)
target.expect("Ctrl-B", timeout = 250)
target.console.write("\x02\x02")
time.sleep(0.3)
target.console.write("\x02\x02")
time.sleep(0.3)
target.expect("iPXE>")
prompt_orig = target.shell.shell_prompt_regex
try:
target.shell.shell_prompt_regex = "iPXE>"
kws = dict(target.kws)
boot_ic = target.kws['pos_boot_interconnect']
mac_addr = target.kws['interconnects'][boot_ic]['mac_addr']
ipv4_addr = target.kws['interconnects'][boot_ic]['ipv4_addr']
ipv4_prefix_len = target.kws['interconnects'][boot_ic]['ipv4_prefix_len']
kws['ipv4_netmask'] = commonl.ipv4_len_to_netmask_ascii(ipv4_prefix_len)
ifstat = target.shell.run("ifstat", output = True, trim = True)
regex = re.compile(
"(?P<ifname>net[0-9]+): %s using" % mac_addr.lower(),
re.MULTILINE)
m = regex.search(ifstat)
if not m:
raise tcfl.tc.error_e(
"iPXE: cannot find interface name for MAC address %s;"
" is the MAC address in the configuration correct?"
% mac_addr.lower(),
dict(target = target, ifstat = ifstat,
mac_addr = mac_addr.lower())
)
ifname = m.groupdict()['ifname']
target.shell.run("set %s/ip %s" % (ifname, ipv4_addr))
target.shell.run("set %s/netmask %s" % (ifname, kws['ipv4_netmask']))
target.shell.run("ifopen " + ifname)
if sanboot_url == "skip":
target.report_info("not booting", level = 0)
else:
target.send("sanboot %s" % sanboot_url)
finally:
target.shell.shell_prompt_regex = prompt_orig
ZEPHYR_BASE = os.environ.get(
'ZEPHYR_BASE',
'__environment_variable_ZEPHYR_BASE__not_exported__')
def zephyr_tags():
tags = {}
zephyr_vars = set([ 'ZEPHYR_BASE', 'ZEPHYR_GCC_VARIANT',
'ZEPHYR_TOOLCHAIN_VARIANT' ])
zephyr_vars_missing = zephyr_vars - set(os.environ.keys())
if 'ZEPHYR_GCC_VARIANT' in zephyr_vars_missing and 'ZEPHYR_TOOLCHAIN_VARIANT' in set(os.environ.keys()):
zephyr_vars_missing.remove('ZEPHYR_GCC_VARIANT')
if zephyr_vars_missing:
tags['skip'] = ",".join(zephyr_vars_missing) + " not exported"
return tags
def console_dump_on_failure(testcase, alevel = 0):
assert isinstance(testcase, tcfl.tc.tc_c)
if not testcase.result_eval.failed and not testcase.result_eval.errors and not testcase.result_eval.blocked:
return
for target in list(testcase.targets.values()):
if not hasattr(target, "console"):
continue
attachments = {}
console_list = target.console.list()
if len(console_list) == 1:
attachments["console"] = target.console.generator_factory(None)
else:
for console in console_list:
attachments['console[' + console + ']'] = target.console.generator_factory(console)
if testcase.result_eval.failed:
target.report_fail("console dump due to failure",
attachments, alevel = alevel)
elif testcase.result_eval.errors:
target.report_error("console dump due to errors",
attachments, alevel = alevel)
else:
target.report_blck("console dump due to blockage",
attachments, alevel = alevel)
def target_ic_kws_get(target, ic, keyword, default = None):
target.report_info(
"DEPRECATED: tcfl.tl.target_ic_kws_get() deprecated in"
" favour of target.ic_key_get()",
dict(trace = traceback.format_stack()))
return target.ic_key_get(ic, keyword, default)
def setup_verify_slip_feature(zephyr_client, zephyr_server, _ZEPHYR_BASE):
assert isinstance(zephyr_client, tcfl.tc.target_c)
assert isinstance(zephyr_server, tcfl.tc.target_c)
client_cfg = zephyr_client.zephyr.config_file_read()
server_cfg = zephyr_server.zephyr.config_file_read()
slip_mac_addr_found = False
for file_name in [
os.path.join(_ZEPHYR_BASE, "drivers", "net", "Kconfig"),
os.path.join(_ZEPHYR_BASE, "drivers", "slip", "Kconfig"),
]:
if os.path.exists(file_name):
with open(file_name, "r") as f:
if "SLIP_MAC_ADDR" in f.read():
slip_mac_addr_found = True
if ('CONFIG_SLIP' in client_cfg or 'CONFIG_SLIP' in server_cfg) and not slip_mac_addr_found:
raise tcfl.tc.blocked_e(
"Can't test: your Zephyr kernel in %s lacks support for "
"setting the SLIP MAC address via configuration "
"(CONFIG_SLIP_MAC_ADDR) -- please upgrade"
% _ZEPHYR_BASE, dict(dlevel = -1)
)
def teardown_targets_power_off(testcase):
assert isinstance(testcase, tcfl.tc.tc_c)
for dummy_twn, target in reversed(list(testcase.targets.items())):
target.power.off()
def tcpdump_enable(ic):
assert isinstance(ic, tcfl.tc.target_c)
ic.property_set('tcpdump', ic.kws['tc_hash'] + ".cap")
def tcpdump_collect(ic, filename = None):
assert isinstance(ic, tcfl.tc.target_c)
assert filename == None or isinstance(filename, str)
if filename == None:
filename = "report-%(runid)s:%(tc_hash)s" % ic.kws + "-%d" % (ic.testcase.eval_count + 1) + ".tcpdump"
ic.power.off()
ic.store.dnload(ic.kws['tc_hash'] + ".cap", filename)
ic.report_info("tcpdump available in file %s" % filename)
_os_release_regex = re.compile("^[_A-Z]+=.*$")
def linux_os_release_get(target, prefix = ""):
os_release = {}
output = target.shell.run("cat %s/etc/os-release || true" % prefix,
output = True, trim = True)
for line in output.split("\n"):
line = line.strip()
if not _os_release_regex.search(line):
continue
field, value = line.strip().split("=", 1)
os_release[field] = value.strip('"')
target.kw_set("linux.distro", os_release['ID'].strip('"'))
target.kw_set("linux.distro_version", os_release['VERSION_ID'].strip('"'))
return os_release
def linux_mount_scratchfs(target,
reformat: bool = True, path: str = "/scratch"):
output = target.shell.run("cat /proc/mounts", output = True, trim = True)
if ' /scratch ' not in output:
if reformat:
target.shell.run("mkfs.ext4 -F /dev/disk/by-partlabel/TCF-scratch")
target.shell.run(f"mkdir -p {path}")
target.shell.run(f"mount /dev/disk/by-partlabel/TCF-scratch {path}")
|
Apache License 2.0
|
chtd/doc-versions
|
documents/models.py
|
Document.document_save
|
python
|
def document_save(self, document_start=None):
if self.document_start is not None and document_start is not None:
assert self.document_start <= document_start
self.document_start = document_start or datetime.now()
self.document_end = datetime.max
if self.document_id and self.id:
if self.__class__.objects .filter(id=self.id,
document_id=self.document_id,
document_end__gt=FUTURE) .update(document_end=self.document_start) != 1:
raise self.ChangedAlready()
elif self.document_id:
self.__class__.objects .filter(document_id=self.document_id,
document_end__gt=FUTURE) .update(document_end=self.document_start)
self.id = self.pk = None
self.save(force_insert=True)
if self.document_id == 0:
self.document_id = self.new_document_id()
self.save(force_update=True)
|
Save the new version of the document
:param id: if given, should be the identifier of the last version,
from which the current one is beeing created. If this condition
is violated, we throw ChangedAlready exception. You should check
for this exception to ensure that there are not concurrent edits
to the same document.
:param document_start: equals datetime.now() by default -
will be the time of the start of new version, and the end of the
old version.
Should be overriden in compound documents to get consistent verions
(as different parts might be saved at different times), and links
from the old parts might need updating too.
|
https://github.com/chtd/doc-versions/blob/ae536892f6245206abb7145592cf61408bc1161c/documents/models.py#L104-L142
|
from datetime import datetime
from django.db import models, transaction, connection, DEFAULT_DB_ALIAS
from django.shortcuts import get_object_or_404
from django.conf import settings
from documents.retrospection import now
FUTURE = datetime(3000, 1, 1)
class DocumentPartNowManager(models.Manager):
def get_query_set(self):
dt = now()
tm = self.model.to_master()
d = { tm + '__document_start__lte': dt,
tm + '__document_end__gt': dt}
return super(DocumentPartNowManager, self).get_query_set().filter(**d)
class DocumentNowManager(models.Manager):
def get_query_set(self):
dt = now()
return super(DocumentNowManager, self).get_query_set().filter(
document_start__lte=dt, document_end__gt=dt)
class DocumentPart(models.Model):
class ConfigurationError(Exception):
pass
class Meta:
abstract = True
@classmethod
def document_get(cls, dt, **kwargs):
return cls.at(dt, **kwargs).get()
@classmethod
def to_master(cls):
raise NotImplementedError
@classmethod
def at(cls, dt, **kwargs):
tm = cls.to_master()
d = { tm + '__document_start__lte': dt,
tm + '__document_end__gt': dt}
d.update(kwargs)
return cls.objects.filter(**d)
def history(self, **kwargs):
cls = self.__class__
tm = cls.to_master()
document = self
for r in tm.split('__'):
document = getattr(document, r)
d = { tm + '__document_id': document.document_id,
tm + '__document_start__lt': models.F(tm + '__document_end')}
d.update(kwargs)
return cls.objects.filter(**d).order_by('-' + tm + '__document_start')
objects = models.Manager()
now = DocumentPartNowManager()
class Document(DocumentPart):
class Meta:
abstract = True
document_start = models.DateTimeField(
'Time of the start of this version',
editable=False, db_index=True)
document_end = models.DateTimeField(
'Time of the end of this version',
editable=False, db_index=True)
document_id = models.IntegerField(
'Document identifier',
editable=False, default=0, db_index=True)
class ChangedAlready(Exception):
pass
objects = models.Manager()
now = DocumentNowManager()
|
MIT License
|
czhu95/ternarynet
|
tensorpack/tfutils/varmanip.py
|
dump_session_params
|
python
|
def dump_session_params(path):
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var.extend(tf.get_collection(EXTRA_SAVE_VARS_KEY))
result = {}
for v in var:
name = get_savename_from_varname(v.name)
if name in result:
logger.info("Variable {} would be stored instead of another with \
the same name".format(v.name))
result[name] = v.eval()
logger.info("Variables to save to {}:".format(path))
logger.info(str(result.keys()))
np.save(path, result)
|
Dump value of all trainable + to_save variables to a dict and save to `path` as
npy format, loadable by ParamRestore
|
https://github.com/czhu95/ternarynet/blob/1a67251f7f5a1cdf854f87f90f841655c7c9f11c/tensorpack/tfutils/varmanip.py#L73-L88
|
import six
import tensorflow as tf
from collections import defaultdict
import re
import numpy as np
from ..utils import logger
from ..utils.naming import *
__all__ = ['SessionUpdate', 'dump_session_params', 'dump_chkpt_vars',
'get_savename_from_varname']
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
name = varname
if 'towerp' in name:
logger.error("No variable should be under 'towerp' name scope".format(v.name))
return None
if 'tower' in name:
name = re.sub('tower[p0-9]+/', '', name)
if varname_prefix is not None and name.startswith(varname_prefix):
name = name[len(varname_prefix)+1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name
class SessionUpdate(object):
def __init__(self, sess, vars_to_update):
self.sess = sess
self.assign_ops = defaultdict(list)
for v in vars_to_update:
with tf.device('/cpu:0'):
p = tf.placeholder(v.dtype)
savename = get_savename_from_varname(v.name)
self.assign_ops[savename].append((p, v, v.assign(p)))
def update(self, prms):
for name, value in six.iteritems(prms):
assert name in self.assign_ops
for p, v, op in self.assign_ops[name]:
varshape = tuple(v.get_shape().as_list())
if varshape != value.shape:
assert np.prod(varshape) == np.prod(value.shape), "{}: {}!={}".format(name, varshape, value.shape)
logger.warn("Param {} is reshaped during assigning".format(name))
value = value.reshape(varshape)
self.sess.run(op, feed_dict={p: value})
|
Apache License 2.0
|
google/tensornetwork
|
tensornetwork/network_components.py
|
CopyNode.__init__
|
python
|
def __init__(self,
rank: int,
dimension: int,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Text] = None,
dtype: Type[np.number] = np.float64) -> None:
if backend is None:
backend = get_default_backend()
backend_obj = backend_factory.get_backend(backend)
self.rank = rank
self.dimension = dimension
self._tensor = None
self.copy_node_dtype = dtype
super().__init__(name=name,
axis_names=axis_names,
backend=backend_obj,
shape=(dimension,) * rank)
|
Initialize a CopyNode:
Args:
rank: The rank of the tensor.
dimension: The dimension of each leg.
name: A name for the node.
axis_names: axis_names for the node.
backend: An optional backend for the node. If `None`, a default
backend is used
dtype: The dtype used to initialize a numpy-copy node.
Note that this dtype has to be a numpy dtype, and it has to be
compatible with the dtype of the backend, e.g. for a tensorflow
backend with a tf.Dtype=tf.floa32, `dtype` has to be `np.float32`.
|
https://github.com/google/tensornetwork/blob/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/network_components.py#L739-L773
|
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Type, Union, overload, Sequence, Iterable
import numpy as np
from abc import ABC
from abc import abstractmethod
import h5py
from tensornetwork import ops
from tensornetwork.backends import backend_factory
from tensornetwork.backends.abstract_backend import AbstractBackend
from tensornetwork.backend_contextmanager import get_default_backend
STRING_ENCODING = 'utf-8'
string_type = h5py.string_dtype(encoding=STRING_ENCODING)
Tensor = Any
class AbstractNode(ABC):
def __init__(self,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[AbstractBackend] = None,
shape: Optional[Tuple[int]] = None) -> None:
self.is_disabled = False
if name is None:
name = '__unnamed_node__'
else:
if not isinstance(name, str):
raise TypeError("Node name should be str type")
self.name = name
self.backend = backend
self._shape = shape
if axis_names is not None:
for axis_name in axis_names:
if not isinstance(axis_name, str):
raise TypeError("axis_names should be str type")
self._edges = [
Edge(node1=self, axis1=i, name=edge_name)
for i, edge_name in enumerate(axis_names)
]
elif shape is not None:
self._edges = [
Edge(node1=self, axis1=i, name="__unnamed_edge__")
for i, _ in enumerate(shape)
]
else:
raise ValueError("One of axis_names or shape must be provided.")
if axis_names is not None:
self.add_axis_names(axis_names)
else:
self._axis_names = [str(i) for i in range(len(shape))]
collection = ops.get_current_collection()
if collection is not None:
collection.add(self)
super().__init__()
def __add__(self, other: Union[int, float, "AbstractNode"]) -> "AbstractNode":
raise NotImplementedError("AbstractNode has not implemented addition ( + )")
def __sub__(self, other: Union[int, float, "AbstractNode"]) -> "AbstractNode":
raise NotImplementedError(
"AbstractNode has not implemented subtraction ( - )")
def __mul__(self, other: Union[int, float, "AbstractNode"]) -> "AbstractNode":
raise NotImplementedError("AbstractNode has not implemented multiply ( * )")
def __truediv__(self, other: Union[int, float,
"AbstractNode"]) -> "AbstractNode":
raise NotImplementedError("AbstractNode has not implemented divide ( / )")
@property
def dtype(self):
return self.tensor.dtype
def add_axis_names(self, axis_names: List[Text]) -> None:
if len(axis_names) != len(set(axis_names)):
raise ValueError("Not all axis names are unique.")
if len(axis_names) != len(self.shape):
raise ValueError("axis_names is not the same length as the tensor shape."
"axis_names length: {}, tensor.shape length: {}".format(
len(axis_names), len(self.shape)))
for axis_name in axis_names:
if not isinstance(axis_name, str):
raise TypeError("axis_names should be str type")
self.axis_names = axis_names[:]
def add_edge(self,
edge: "Edge",
axis: Union[int, Text],
override: bool = False) -> None:
axis_num = self.get_axis_number(axis)
if axis_num < 0 or axis_num >= len(self.shape):
raise ValueError("Axis must be positive and less than rank of the tensor")
if not self.edges[axis_num].is_dangling() and not override:
raise ValueError(
"Node '{}' already has a non-dangling edge for axis {}".format(
self, axis))
self.edges[axis_num] = edge
@abstractmethod
def get_tensor(self) -> Tensor:
return
@abstractmethod
def set_tensor(self, tensor) -> None:
return
@property
@abstractmethod
def shape(self) -> Tuple[Optional[int], ...]:
if self._shape is None:
raise ValueError('Please ensure this Node has a well-defined shape')
return self._shape
@property
def sparse_shape(self) -> Any:
return self.backend.sparse_shape(self.tensor)
@property
@abstractmethod
def tensor(self) -> Tensor:
return
@tensor.setter
@abstractmethod
def tensor(self, tensor: Tensor) -> None:
return
def get_rank(self) -> int:
return len(self.shape)
def reorder_edges(self, edge_order: List["Edge"]) -> "AbstractNode":
if not hasattr(self, '_tensor'):
raise AttributeError("Please provide a valid tensor for this Node.")
extra_edges = set(edge_order).difference(set(self.edges))
if extra_edges:
raise ValueError("Given edge order does not match expected edges. "
"Additional edges that do not belong to node found: "
"{}".format(extra_edges))
missing_edges = set(self.edges).difference(set(edge_order))
if missing_edges:
raise ValueError("Given edge order does not match expected edges. "
"Missing edges that belong to node found: "
"{}".format(missing_edges))
for edge in edge_order:
if edge.node1 == edge.node2:
raise ValueError("Edge reordering does not support trace edges. "
"Found trace edge: '{}'".format(edge))
permutation = []
for i, edge in enumerate(edge_order):
old_position = self.edges.index(edge)
permutation.append(old_position)
edge.update_axis(old_position, self, i, self)
self.edges = edge_order[:]
self.tensor = self.backend.transpose(self.tensor, perm=permutation)
if self.axis_names is not None:
tmp_axis_names = []
for i in permutation:
tmp_axis_names.append(self.axis_names[i])
self.axis_names = tmp_axis_names
return self
def reorder_axes(self, perm: List[int]) -> "AbstractNode":
if not hasattr(self, '_tensor'):
raise AttributeError("Please provide a valid tensor for this Node.")
if set(perm) != set(range(len(self.edges))):
raise ValueError("A full permutation was not passed. "
"Permutation passed: {}".format(perm))
self.tensor = self.backend.transpose(self.tensor, perm=perm)
tmp_edges = []
for i, position in enumerate(perm):
edge = self.edges[position]
edge.update_axis(position, self, i, self)
tmp_edges.append(edge)
self.edges = tmp_edges
if self.axis_names is not None:
tmp_axis_names = []
for i in perm:
tmp_axis_names.append(self.axis_names[i])
self.axis_names = tmp_axis_names
return self
def tensor_from_edge_order(self, perm: List["Edge"]) -> "AbstractNode":
order = []
for edge in perm:
if edge.node1 is self:
order.append(edge.axis1)
elif edge.node2 is self:
order.append(edge.axis2)
else:
raise ValueError("edge {} is not connected to node {}".format(
edge.name, self.name))
return self.backend.transpose(self.tensor, order)
def get_axis_number(self, axis: Union[Text, int]) -> int:
if isinstance(axis, int):
return axis
try:
return self.axis_names.index(axis)
except ValueError as err:
raise ValueError("Axis name '{}' not found for node '{}'".format(
axis, self)) from err
def get_dimension(self, axis: Union[Text, int]) -> Optional[int]:
axis_num = self.get_axis_number(axis)
if axis_num < 0 or axis_num >= len(self.shape):
raise ValueError("Axis must be positive and less than rank of the tensor")
return self.shape[axis_num]
def get_edge(self, axis: Union[int, Text]) -> "Edge":
axis_num = self.get_axis_number(axis)
return self.edges[axis_num]
def get_all_edges(self) -> List["Edge"]:
return self.edges[:]
def get_all_nondangling(self) -> Set["Edge"]:
return {edge for edge in self.edges if not edge.is_dangling()}
def get_all_dangling(self) -> List["Edge"]:
return [edge for edge in self.edges if edge.is_dangling()]
def set_name(self, name) -> None:
if not isinstance(name, str):
raise TypeError("Node name should be str type")
self.name = name
def has_nondangling_edge(self) -> bool:
for e in self.edges:
if not e.is_dangling():
return True
return False
def has_dangling_edge(self) -> bool:
for e in self.edges:
if e.is_dangling():
return True
return False
@overload
def __getitem__(self, key: slice) -> List["Edge"]:
pass
@overload
def __getitem__(self, key: Union[int, Text]) -> "Edge":
pass
def __getitem__(self, key: Union[int, Text,
slice]) -> Union["Edge", List["Edge"]]:
if isinstance(key, slice):
return self.edges[key]
return self.get_edge(key)
def __str__(self) -> Text:
return self.name
def __lt__(self, other) -> bool:
if not isinstance(other, AbstractNode):
raise ValueError("Object {} is not a Node type.".format(other))
return id(self) < id(other)
def __matmul__(self, other: "AbstractNode") -> "AbstractNode":
if not hasattr(self, '_tensor'):
raise AttributeError("Please provide a valid tensor for this Node.")
if not isinstance(other, AbstractNode):
raise TypeError("Cannot use '@' with type '{}'".format(type(other)))
if self.is_disabled:
raise ValueError("Cannot use '@' on disabled node {}.".format(self.name))
return contract_between(self, other)
@property
def edges(self) -> List["Edge"]:
if self.is_disabled:
raise ValueError('Node {} has been disabled. '
'Accessing its edges is no longer possible'.format(
self.name))
return self._edges
@edges.setter
def edges(self, edges: List) -> None:
if self.is_disabled:
raise ValueError('Node {} has been disabled.'
'Assigning edges is no longer possible'.format(
self.name))
self._edges = edges
@property
def name(self) -> Text:
return self._name
@name.setter
def name(self, name) -> None:
if not isinstance(name, str):
raise TypeError("Node name should be str type")
self._name = name
@property
def axis_names(self) -> List[Text]:
return self._axis_names
@axis_names.setter
def axis_names(self, axis_names: List[Text]) -> None:
if len(axis_names) != len(self.shape):
raise ValueError("Expected {} names, only got {}.".format(
len(self.shape), len(axis_names)))
for axis_name in axis_names:
if not isinstance(axis_name, str):
raise TypeError("axis_names should be str type")
self._axis_names = axis_names
def disable(self) -> None:
if self.is_disabled:
raise ValueError('Node {} is already disabled'.format(self.name))
self.is_disabled = True
@classmethod
@abstractmethod
def _load_node(cls, node_data: h5py.Group) -> "AbstractNode":
return
@classmethod
def _load_node_data(cls, node_data: h5py.Group) -> Tuple[Any, Any, Any, Any]:
name = node_data['name'].asstr(STRING_ENCODING)[()]
backend = node_data['backend'].asstr(STRING_ENCODING)[()]
shape = node_data['shape'][()]
axis_names = node_data['axis_names'].asstr(STRING_ENCODING)[()]
return name, shape, axis_names, backend
@abstractmethod
def _save_node(self, node_group: h5py.Group) -> None:
node_group.create_dataset(
'type', dtype=string_type, data=type(self).__name__)
node_group.create_dataset(
'backend', dtype=string_type, data=self.backend.name)
node_group.create_dataset('name', data=self.name, dtype=string_type)
node_group.create_dataset('shape', data=self.shape)
if self.axis_names:
node_group.create_dataset('axis_names',
dtype=string_type,
data=np.array(self.axis_names, dtype=object))
else:
node_group.create_dataset('axis_names', dtype='i', data=123456789)
node_group.create_dataset('edges',
dtype=string_type,
data=np.array([edge.name for edge in self.edges],
dtype=object))
@abstractmethod
def to_serial_dict(self) -> Dict:
node_dict = {
'name': self.name,
'axis_names': self.axis_names,
'backend': self.backend.name,
}
return node_dict
@classmethod
@abstractmethod
def from_serial_dict(cls, serial_dict) -> "AbstractNode":
return cls(**serial_dict)
@abstractmethod
def copy(self, conjugate: bool = False) -> "AbstractNode":
return
def fresh_edges(self, axis_names: Optional[List[Text]] = None) -> None:
if not axis_names:
axis_names = self.axis_names
if not axis_names:
axis_names = [str(i) for i in range(len(self.shape))]
for i in range(len(self.edges)):
new_edge = Edge(node1=self, axis1=i, name=axis_names[i])
self.add_edge(new_edge, i, True)
class Node(AbstractNode):
def __init__(self,
tensor: Union[Tensor, AbstractNode],
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> None:
if isinstance(tensor, AbstractNode):
backend = tensor.backend
tensor = tensor.tensor
if backend is None:
backend = get_default_backend()
if isinstance(backend, AbstractBackend):
backend_obj = backend
else:
backend_obj = backend_factory.get_backend(backend)
self._tensor = backend_obj.convert_to_tensor(tensor)
super().__init__(name=name,
axis_names=axis_names,
backend=backend_obj,
shape=backend_obj.shape_tuple(self._tensor))
def op_protection(self, other: Union[int, float, complex, "Node"]) -> Tensor:
if not isinstance(other, (int, float, complex, Node)):
raise TypeError("Operand should be one of int, float, Node type")
if not hasattr(self, '_tensor'):
raise AttributeError("Please provide a valid tensor for this Node.")
if isinstance(other, Node):
if not self.backend.name == other.backend.name:
raise TypeError("Operands backend must match.\noperand 1 backend: {}"
"\noperand 2 backend: {}".format(
self.backend.name, other.backend.name))
if not hasattr(other, '_tensor'):
raise AttributeError("Please provide a valid tensor for this Node.")
return other._tensor
return other
def __add__(self, other: Union[int, float, "Node"]) -> "Node":
other_tensor = self.op_protection(other)
new_tensor = self.backend.addition(self.tensor, other_tensor)
return Node(tensor=new_tensor,
name=self.name,
axis_names=None,
backend=self.backend.name)
def __sub__(self, other: Union[int, float, "Node"]) -> "Node":
other_tensor = self.op_protection(other)
new_tensor = self.backend.subtraction(self.tensor, other_tensor)
return Node(tensor=new_tensor,
name=self.name,
axis_names=None,
backend=self.backend.name)
def __mul__(self, other: Union[int, float, "Node"]) -> "Node":
other_tensor = self.op_protection(other)
new_tensor = self.backend.multiply(self.tensor, other_tensor)
return Node(tensor=new_tensor,
name=self.name,
axis_names=None,
backend=self.backend.name)
def __truediv__(self, other: Union[int, float, "Node"]) -> "Node":
other_tensor = self.op_protection(other)
new_tensor = self.backend.divide(self.tensor, other_tensor)
return Node(tensor=new_tensor,
name=self.name,
axis_names=None,
backend=self.backend.name)
def get_tensor(self) -> Tensor:
return self.tensor
def set_tensor(self, tensor) -> None:
self.tensor = tensor
def copy(self, conjugate: bool = False) -> "Node":
new_node = Node(self.tensor,
name=self.name,
axis_names=self.axis_names,
backend=self.backend)
if conjugate:
new_node.set_tensor(self.backend.conj(self.tensor))
visited_edges = set()
for i, edge in enumerate(self.edges):
if edge in visited_edges:
continue
visited_edges.add(edge)
if edge.node1 == edge.node2:
new_edge = Edge(new_node,
edge.axis1,
name=edge.name,
node2=new_node,
axis2=edge.axis2)
new_node.add_edge(new_edge, edge.axis1)
new_node.add_edge(new_edge, edge.axis2)
else:
new_node.add_edge(Edge(new_node, i, name=edge.name), i)
return new_node
def to_serial_dict(self) -> Dict:
node_dict = super().to_serial_dict()
node_dict['tensor'] = self.backend.serialize_tensor(self.tensor)
return node_dict
@classmethod
def from_serial_dict(cls, serial_dict) -> "Node":
serial_dict['tensor'] = backend_factory.get_backend(
serial_dict['backend']).deserialize_tensor(serial_dict['tensor'])
return cls(**serial_dict)
@property
def shape(self) -> Tuple[Optional[int], ...]:
if self.is_disabled:
raise ValueError('Node {} has been disabled. '
'Access its shape via self.tensor'.format(self.name))
return self.backend.shape_tuple(self._tensor)
@property
def tensor(self) -> Tensor:
return self._tensor
@tensor.setter
def tensor(self, tensor: Tensor) -> Tensor:
self._tensor = tensor
def _save_node(self, node_group: h5py.Group) -> None:
super()._save_node(node_group)
node_group.create_dataset('tensor', data=self._tensor)
@classmethod
def _load_node(cls, node_data: h5py.Group) -> "AbstractNode":
name, _, axis_names, backend = cls._load_node_data(node_data)
tensor = node_data['tensor'][()]
node = Node(tensor,
name=name,
axis_names=[ax for ax in axis_names],
backend=backend)
return node
def __repr__(self) -> Text:
edges = self.get_all_edges()
return (f'{self.__class__.__name__}\n(\n'
f'name : {self.name!r},'
f'\ntensor : \n{self.tensor!r},'
f'\nedges : \n{edges!r} \n)')
class CopyNode(AbstractNode):
|
Apache License 2.0
|
nmisko/monkalot
|
bot/bot.py
|
TwitchBot.load_sources
|
python
|
def load_sources(self):
config = ConfigSource(self.root, self.cache)
emotes = EmoteSource(
config.channel,
cache=self.cache,
twitch_api_headers=config.twitch_api_headers,
)
twitch = TwitchSource(
config.channel,
cache=self.cache,
twitch_api_headers=config.twitch_api_headers,
)
return emotes, twitch, config
|
Reloads data sources.
|
https://github.com/nmisko/monkalot/blob/c4f5de551266daa70370dd31278a47543efb24da/bot/bot.py#L58-L71
|
import logging
import time
import traceback
from collections import defaultdict
import bot.commands
import bot.emotecounter
import bot.ranking
from bot.data_sources.config import ConfigSource
from bot.data_sources.emotes import EmoteSource
from bot.data_sources.twitch import TwitchSource
from bot.utilities.permission import Permission
from bot.utilities.tools import replace_vars
from bot.utilities.tools import sanitize_user_name
from bot.utilities.webcache import WebCache
CACHE_DURATION = 10800
class TwitchBot:
def __init__(self, root):
self.root = root
self.irc = None
self.cache = WebCache(duration=CACHE_DURATION)
self.emotes, self.twitch, self.config = self.load_sources()
self.commands = []
self.games, self.passivegames = self.load_commands()
self.last_warning = defaultdict(int)
self.host_target = False
self.pause = False
self.game_running = False
self.antispeech = False
self.pyramid_block = False
self.last_plebcmd = time.time() - self.config.pleb_cooldowntime
self.last_plebgame = time.time() - self.config.pleb_gametimer
self.ecount = bot.emotecounter.EmoteCounterForBot(self)
self.ecount.start_cpm()
self.ranking = bot.ranking.Ranking(self)
self.mods = set()
self.subs = set()
self.users = self.twitch.get_chatters()
|
MIT License
|
praekeltfoundation/python-whatsapp-business-client
|
wabclient/client.py
|
ConfigurationManager.get_business_profile
|
python
|
def get_business_profile(self):
data = self.connection.get('/v1/settings/business/profile')
return data['settings']['business']
|
Gets the business profile for this account
:return: dict
|
https://github.com/praekeltfoundation/python-whatsapp-business-client/blob/c32e1d0d9a3a7af222a5e8eb69bd50d5c3211667/wabclient/client.py#L357-L364
|
import requests
import mimetypes
import phonenumbers
import attr
import iso8601
from functools import wraps
from datetime import datetime
from six.moves import urllib_parse
from wabclient.exceptions import (
RequestRateLimitingException, ConcurrencyRateLimitingException,
WhatsAppAPIException, AddressException, GroupException)
from wabclient import constants as c
from wabclient.commands import (
MediaCommand, TextCommand, BackupCommand, RestoreBackupCommand,
ContactsCommand, RegistrationCommand, VerifyCommand, AboutCommand,
ApplicationSettingsCommand, BusinessProfileCommand, CreateGroupCommand,
UpdateGroupCommand, RevokeGroupInviteLink, AddGroupAdminCommand,
RemoveGroupAdminCommand, RemoveGroupParticipantCommand, LeaveGroupCommand,
HSMCommand, UpdatePasswordCommand, CreateUserCommand, RetrieveGroups,
SetShardingCommand, InitialPasswordCommand)
DEFAULT_TIMEOUT = 10
def json_or_death(func):
@wraps(func)
def decorator(*args, **kwargs):
resp = func(*args, **kwargs)
resp.raise_for_status()
return resp.json()
return decorator
def guess_content_type(filename, fallback):
(content_type, encoding) = mimetypes.guess_type(filename)
return content_type or fallback
def has_url(content):
return (content is not None) and (
'http://' in content or
'https://' in content)
error_map = {
429: RequestRateLimitingException,
503: ConcurrencyRateLimitingException,
}
default_exception = WhatsAppAPIException
def fail(data):
error = data.get('error', {})
exception_class = error_map.get(
error.get('errorcode'), default_exception)
return exception_class(data)
class Connection(object):
def __init__(self, url, timeout=DEFAULT_TIMEOUT, session=None):
self.url = url
self.session = session or requests.Session()
self.timeout = timeout
@json_or_death
def upload(self, path, fp, content_type):
return self.session.post(
urllib_parse.urljoin(self.url, path),
data=fp.read(),
headers={'Content-Type': content_type})
def upload_media(self, fp, content_type):
data = self.upload('/v1/media', fp, content_type)
[media] = data["media"]
return media["id"]
def download(self, filename):
response = self.session.get(
urllib_parse.urljoin(self.url, filename),
stream=True)
response.raise_for_status()
response.raw.decode_content = True
return (
int(response.headers['content-length']), response.raw)
def download_media(self, media_id):
response = self.session.get(
urllib_parse.urljoin(self.url, '/v1/media/%s' % (media_id,)),
stream=True)
response.raise_for_status()
response.raw.decode_content = True
return (
int(response.headers['content-length']), response.raw)
@json_or_death
def get(self, path, params={}):
return self.session.get(
urllib_parse.urljoin(self.url, path), params=params)
def post(self, path, *args, **kwargs):
return self.session.post(
urllib_parse.urljoin(
self.url, path), *args, **kwargs)
@json_or_death
def send(self, command):
return self.session.request(
command.get_method(),
urllib_parse.urljoin(self.url, command.get_endpoint()),
json=command.render())
def set_token(self, token):
self.session.headers.update({
'Authorization': 'Bearer %s' % (token,)
})
@attr.s
class Group(object):
id = attr.ib(type=str)
creation_time = attr.ib(
type=int, default=None, converter=datetime.fromtimestamp)
subject = attr.ib(type=str, default=None)
creator = attr.ib(type=str, default=None)
admins = attr.ib(default=attr.Factory(list))
participants = attr.ib(default=attr.Factory(list))
class GroupManager(object):
def __init__(self, url, timeout=DEFAULT_TIMEOUT, session=None):
self.url = url
self.connection = Connection(
self.url, timeout=timeout, session=session)
def create(self, subject, profile_photo=None, profile_photo_name=None):
if not subject:
raise GroupException('Subjects are required')
elif len(subject) > 25:
raise GroupException('Subject length must be <= 25 characters')
data = self.connection.send(CreateGroupCommand(subject=subject))
[group_data] = data["groups"]
group_data.update({
'subject': subject,
})
group = Group(**group_data)
if profile_photo:
if not profile_photo_name:
raise GroupException('Profile photo name is mandatory.')
self.set_profile_photo(
group.id, profile_photo, profile_photo_name)
return group
def update_group(self, group_id, subject):
return self.connection.send(UpdateGroupCommand(group_id, subject))
def set_profile_photo(self, group_id, fp, file_name):
self.connection.upload(
'/v1/groups/%s/icon' % (group_id,),
fp, guess_content_type(file_name, 'image/jpeg'))
def get_invite_link(self, group_id):
response = self.connection.get('/v1/groups/%s/invite' % (group_id,))
[group_data] = response['groups']
return group_data['link']
def revoke_invite_link(self, group_id):
return self.connection.send(RevokeGroupInviteLink(group_id))
def add_admins(self, group_id, participants):
return self.connection.send(AddGroupAdminCommand(
group_id=group_id, wa_ids=participants))
def remove_admins(self, group_id, participants):
return self.connection.send(RemoveGroupAdminCommand(
group_id=group_id, wa_ids=participants))
def remove_participants(self, group_id, participants):
return self.connection.send(RemoveGroupParticipantCommand(
group_id=group_id, wa_ids=participants))
def leave(self, group_id):
return self.connection.send(LeaveGroupCommand(group_id))
def list(self):
response = self.connection.send(RetrieveGroups())
return response['groups']
class ConfigurationManager(object):
CODE_REQUEST_SMS = 'sms'
CODE_REQUEST_VOICE = 'voice'
def __init__(self, url, timeout=DEFAULT_TIMEOUT, session=None):
self.url = url
self.connection = Connection(self.url, timeout=timeout,
session=session)
def setup_shards(self, phonenumber, shard_count, pin=None):
pn = phonenumbers.parse(phonenumber)
return self.connection.send(
SetShardingCommand(
cc=str(pn.country_code),
phone_number=str(pn.national_number),
shards=shard_count,
pin=pin))
def request_code(self, phonenumber, vname,
method=CODE_REQUEST_SMS):
pn = phonenumbers.parse(phonenumber)
data = self.connection.send(
RegistrationCommand(
cc=str(pn.country_code),
phone_number=str(pn.national_number),
method=method,
cert=vname))
return data['account'][0]
def register(self, code):
return self.connection.send(VerifyCommand(code))
def get_profile_photo(self):
(size, data) = self.connection.download('/v1/settings/profile/photo')
return data
def set_profile_photo(self, fp, file_name):
return self.connection.upload(
'/v1/settings/profile/photo', fp,
guess_content_type(file_name, 'image/jpeg'))
def get_about(self):
data = self.connection.get('/v1/settings/profile/about')
return data['settings']['profile']['about']['text']
def set_about(self, about):
return self.connection.send(AboutCommand(about))
|
BSD 3-Clause New or Revised License
|
rapid7/vm-console-client-python
|
rapid7vmconsole/models/content_description.py
|
ContentDescription.text
|
python
|
def text(self):
return self._text
|
Gets the text of this ContentDescription. # noqa: E501
Textual representation of the content. # noqa: E501
:return: The text of this ContentDescription. # noqa: E501
:rtype: str
|
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/content_description.py#L77-L85
|
import pprint
import re
import six
class ContentDescription(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'html': 'str',
'text': 'str'
}
attribute_map = {
'html': 'html',
'text': 'text'
}
def __init__(self, html=None, text=None):
self._html = None
self._text = None
self.discriminator = None
if html is not None:
self.html = html
if text is not None:
self.text = text
@property
def html(self):
return self._html
@html.setter
def html(self, html):
self._html = html
@property
|
MIT License
|
br-idl/paddlevit
|
semantic_segmentation/src/transforms/functional.py
|
imnormalize_
|
python
|
def imnormalize_(img, mean, std):
assert img.dtype != np.uint8
mean = np.float64(mean.reshape(1, -1))
stdinv = 1 / np.float64(std.reshape(1, -1))
cv2.subtract(img, mean, img)
cv2.multiply(img, stdinv, img)
return img
|
Inplace normalize an image with mean and std.
Args:
img (ndarray): Image to be normalized. (0~255)
mean (ndarray): The mean to be used for normalize.
std (ndarray): The std to be used for normalize.
to_rgb (bool): Whether to convert to rgb.
Returns:
ndarray: The normalized image.
|
https://github.com/br-idl/paddlevit/blob/1f02492bdb1ec1b4452c098ad50016c9ab6f2e31/semantic_segmentation/src/transforms/functional.py#L28-L46
|
import cv2
import numpy as np
from PIL import Image, ImageEnhance
from scipy.ndimage.morphology import distance_transform_edt
def normalize(img, mean, std):
img = img.astype(np.float32, copy=False) / 255.0
img -= mean
img /= std
return img
def imnormalize(img, mean, std):
img = img.copy().astype(np.float32)
return imnormalize_(img, mean, std)
|
Apache License 2.0
|
mhvis/solar
|
samil/inverter.py
|
Inverter.model
|
python
|
def model(self) -> Dict:
ident, payload = self.request(b'\x01\x03\x02', b'', b'\x01\x83')
device_types = {
'1': 'Single-phase inverter',
'2': 'Three-phase inverter',
'3': 'SolarEnvi Monitor',
'4': 'R-phase inverter of the three combined single-phase ones',
'5': 'S-phase inverter of the three combined single-phase ones',
'6': 'T-phase inverter of the three combined single-phase ones',
}
return OrderedDict(
device_type=device_types[decode_string(payload[0:1])],
va_rating=decode_string(payload[1:7]),
firmware_version=decode_string(payload[7:12]),
model_name=decode_string(payload[12:28]),
manufacturer=decode_string(payload[28:44]),
serial_number=decode_string(payload[44:60]),
communication_version=decode_string(payload[60:65]),
other_version=decode_string(payload[65:70]),
general=decode_string(payload[70:71]),
)
|
Gets model information from the inverter.
For all possible dictionary items, see the implementation.
|
https://github.com/mhvis/solar/blob/c0f987e6056e992a2978d8f9de49c1b3d56116e7/samil/inverter.py#L72-L96
|
import logging
import socket
import sys
from collections import OrderedDict
from threading import Event, Thread
from time import sleep
from typing import Tuple, Dict, BinaryIO, Any, Optional
from samil.statustypes import status_types
class Inverter:
_status_format = None
def __init__(self, sock: socket, addr):
self.sock = sock
self.sock_file = sock.makefile('rwb')
self.addr = addr
self.sock.settimeout(30.0)
def __enter__(self):
return self
def __exit__(self, *args):
self.disconnect()
def disconnect(self) -> None:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as e:
if e.errno != 107 and e.errno != 9 and e.errno != 10038:
raise e
self.sock_file.close()
self.sock.close()
|
MIT License
|
spiralgenetics/truvari
|
truvari/collapse.py
|
sort_maxqual
|
python
|
def sort_maxqual(b1, b2):
return b1.qual < b2.qual
|
Swap the entry with the one containing the call with the maximum quality score
|
https://github.com/spiralgenetics/truvari/blob/2583253773ccf5e64f6d9c6d951178d8f0cbdfb5/truvari/collapse.py#L146-L150
|
import os
import sys
import json
import logging
import argparse
import itertools
from functools import cmp_to_key
import pysam
import truvari
import truvari.bench as trubench
def collapse_chunk(chunk):
matcher, chunk_dict, chunk_id = chunk
calls = chunk_dict['base']
logging.debug(f"Comparing chunk {calls}")
calls.sort(reverse=True, key=matcher.sorter)
ret = {}
chain_lookup = {}
call_id = 0
while calls:
cur_keep_candidate = calls.pop(0)
keep_key = truvari.entry_to_key(cur_keep_candidate)
if matcher.chain and keep_key in chain_lookup:
keep_key = chain_lookup[keep_key]
else:
ret[keep_key] = [cur_keep_candidate, [], f'{chunk_id}.{call_id}']
remaining_calls = []
for cur_collapse_candidate in calls:
mat = matcher.build_match(cur_keep_candidate,
cur_collapse_candidate,
ret[keep_key][2])
if matcher.hap and not hap_resolve(cur_keep_candidate, cur_collapse_candidate):
mat.state = False
if mat.state:
if matcher.chain:
collap_key = truvari.entry_to_key(cur_collapse_candidate)
chain_lookup[collap_key] = keep_key
remaining_calls.append(cur_collapse_candidate)
ret[keep_key][1].append(mat)
else:
remaining_calls.append(cur_collapse_candidate)
if matcher.hap and ret[keep_key][1]:
candidates = sorted(ret[keep_key][1], reverse=True)
ret[keep_key][1] = [candidates.pop(0)]
remaining_calls.extend(candidates)
calls = remaining_calls
for key, val in ret.items():
logging.debug("Collapsing %s", key)
val[0] = collapse_into_entry(val[0], val[1])
ret = list(ret.values())
for i in chunk_dict['__filtered']:
ret.append([i, None, None])
return ret
def collapse_into_entry(entry, others, hap_mode=False):
if not others:
return entry
others.sort(reverse=True)
replace_gts = ["UNK", "REF", "NON"]
if hap_mode:
replace_gts.append("HET")
for sample in entry.samples:
m_gt = truvari.get_gt(entry.samples[sample]["GT"]).name
if m_gt not in replace_gts:
continue
n_idx = None
for pos, o_entry in enumerate(others):
o_entry = o_entry.comp
o_gt = truvari.get_gt(o_entry.samples[sample]["GT"]).name
if o_gt not in replace_gts:
n_idx = pos
break
if hap_mode and m_gt == "HET":
entry.samples[sample]["GT"] = (1, 1)
elif n_idx is not None:
o_entry = others[n_idx].comp
for key in set(entry.samples[sample].keys() + o_entry.samples[sample].keys()):
entry.samples[sample][key] = o_entry.samples[sample][key]
return entry
def hap_resolve(entryA, entryB):
gtA = entryA.samples[0]["GT"]
gtB = entryB.samples[0]["GT"]
if gtA == (1, 1) or gtB == (1, 1):
return False
if gtA == gtB:
return False
return True
def sort_first(b1, b2):
return b1.pos < b2.pos
|
MIT License
|
taxiiproject/libtaxii
|
libtaxii/scripts/collection_information_client.py
|
main
|
python
|
def main():
script = CollectionInformationClient11Script()
script()
|
Send a Collection Information Request to a Taxii 1.0 Service
|
https://github.com/taxiiproject/libtaxii/blob/41f413357f4aaaaf99e37152558bb7464dbf5768/libtaxii/scripts/collection_information_client.py#L25-L28
|
import libtaxii.messages_11 as tm11
from libtaxii.scripts import TaxiiScript
class CollectionInformationClient11Script(TaxiiScript):
parser_description = 'The TAXII 1.1 Collection Information Client sends a Collection Information Request ' 'to a TAXII Server and then prints the resulting Collection Information Response to ' 'standard out.'
path = '/taxii-data'
def create_request_message(self, args):
message_id = tm11.generate_message_id()
return tm11.CollectionInformationRequest(message_id)
|
BSD 3-Clause New or Revised License
|
google/pinject
|
pinject/decorators.py
|
annotate_arg
|
python
|
def annotate_arg(arg_name, with_annotation):
arg_binding_key = arg_binding_keys.new(arg_name, with_annotation)
return _get_pinject_wrapper(locations.get_back_frame_loc(),
arg_binding_key=arg_binding_key)
|
Adds an annotation to an injected arg.
arg_name must be one of the named args of the decorated function, i.e.,
@annotate_arg('foo', with_annotation='something')
def a_function(foo): # ...
is OK, but
@annotate_arg('foo', with_annotation='something')
def a_function(bar, **kwargs): # ...
is not.
The same arg (on the same function) may not be annotated twice.
Args:
arg_name: the name of the arg to annotate on the decorated function
with_annotation: an annotation object
Returns:
a function that will decorate functions passed to it
|
https://github.com/google/pinject/blob/93b7e7a0ef1cf61ae7e267b0ff269024352d0f07/pinject/decorators.py#L32-L53
|
import decorator
from . import arg_binding_keys
from . import support
from . import errors
from . import locations
from . import scoping
_ARG_BINDING_KEYS_ATTR = '_pinject_arg_binding_keys'
_IS_WRAPPER_ATTR = '_pinject_is_wrapper'
_NON_INJECTABLE_ARG_NAMES_ATTR = '_pinject_non_injectables'
_ORIG_FN_ATTR = '_pinject_orig_fn'
_PROVIDER_DECORATIONS_ATTR = '_pinject_provider_decorations'
|
Apache License 2.0
|
campaignmonitor/createsend-python
|
lib/createsend/campaign.py
|
Campaign.unsubscribes
|
python
|
def unsubscribes(self, date="", page=1, page_size=1000, order_field="date", order_direction="asc"):
params = {
"date": date,
"page": page,
"pagesize": page_size,
"orderfield": order_field,
"orderdirection": order_direction}
response = self._get(self.uri_for("unsubscribes"), params=params)
return json_to_py(response)
|
Retrieves the unsubscribes for this campaign.
|
https://github.com/campaignmonitor/createsend-python/blob/7399f44a90507f2c555b83d176904bb261983959/lib/createsend/campaign.py#L159-L168
|
from __future__ import absolute_import
import json
from createsend.createsend import CreateSendBase
from createsend.utils import json_to_py
class Campaign(CreateSendBase):
def __init__(self, auth=None, campaign_id=None):
self.campaign_id = campaign_id
super(Campaign, self).__init__(auth)
def create(self, client_id, subject, name, from_name, from_email, reply_to, html_url,
text_url, list_ids, segment_ids):
body = {
"Subject": subject,
"Name": name,
"FromName": from_name,
"FromEmail": from_email,
"ReplyTo": reply_to,
"HtmlUrl": html_url,
"TextUrl": text_url,
"ListIDs": list_ids,
"SegmentIDs": segment_ids}
response = self._post("/campaigns/%s.json" %
client_id, json.dumps(body))
self.campaign_id = json_to_py(response)
return self.campaign_id
def create_from_template(self, client_id, subject, name, from_name,
from_email, reply_to, list_ids, segment_ids, template_id, template_content):
body = {
"Subject": subject,
"Name": name,
"FromName": from_name,
"FromEmail": from_email,
"ReplyTo": reply_to,
"ListIDs": list_ids,
"SegmentIDs": segment_ids,
"TemplateID": template_id,
"TemplateContent": template_content}
response = self._post("/campaigns/%s/fromtemplate.json" %
client_id, json.dumps(body))
self.campaign_id = json_to_py(response)
return self.campaign_id
def send_preview(self, recipients, personalize="fallback"):
body = {
"PreviewRecipients": [recipients] if isinstance(recipients, str) else recipients,
"Personalize": personalize}
response = self._post(self.uri_for("sendpreview"), json.dumps(body))
def send(self, confirmation_email, send_date="immediately"):
body = {
"ConfirmationEmail": confirmation_email,
"SendDate": send_date}
response = self._post(self.uri_for("send"), json.dumps(body))
def unschedule(self):
response = self._post(self.uri_for("unschedule"), json.dumps({}))
def delete(self):
response = self._delete("/campaigns/%s.json" % self.campaign_id)
def summary(self):
response = self._get(self.uri_for("summary"))
return json_to_py(response)
def email_client_usage(self):
response = self._get(self.uri_for("emailclientusage"))
return json_to_py(response)
def lists_and_segments(self):
response = self._get(self.uri_for("listsandsegments"))
return json_to_py(response)
def recipients(self, page=1, page_size=1000, order_field="email", order_direction="asc"):
params = {
"page": page,
"pagesize": page_size,
"orderfield": order_field,
"orderdirection": order_direction}
response = self._get(self.uri_for("recipients"), params=params)
return json_to_py(response)
def opens(self, date="", page=1, page_size=1000, order_field="date", order_direction="asc"):
params = {
"date": date,
"page": page,
"pagesize": page_size,
"orderfield": order_field,
"orderdirection": order_direction}
response = self._get(self.uri_for("opens"), params=params)
return json_to_py(response)
def clicks(self, date="", page=1, page_size=1000, order_field="date", order_direction="asc"):
params = {
"date": date,
"page": page,
"pagesize": page_size,
"orderfield": order_field,
"orderdirection": order_direction}
response = self._get(self.uri_for("clicks"), params=params)
return json_to_py(response)
|
MIT License
|
datastax/python-driver
|
cassandra/connection.py
|
EndPointFactory.create
|
python
|
def create(self, row):
raise NotImplementedError()
|
Create an EndPoint from a system.peers row.
|
https://github.com/datastax/python-driver/blob/12a8adce943fe37a05ad6580e8bd302b65c2d93a/cassandra/connection.py#L175-L179
|
from __future__ import absolute_import
from collections import defaultdict, deque
import errno
from functools import wraps, partial, total_ordering
from heapq import heappush, heappop
import io
import logging
import six
from six.moves import range
import socket
import struct
import sys
from threading import Thread, Event, RLock, Condition
import time
import ssl
import weakref
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion
from cassandra.marshal import int32_pack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, ProtocolHandler,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException,
RegisterMessage, ReviseRequestMessage)
from cassandra.segment import SegmentCodec, CrcException
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
segment_codec_no_compression = SegmentCodec()
segment_codec_lz4 = None
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
try:
from lz4 import block as lz4_block
except ImportError:
lz4_block = lz4
try:
lz4_block.compress
lz4_block.decompress
except AttributeError:
raise ImportError(
'lz4 not imported correctly. Imported object should have '
'.compress and and .decompress attributes but does not. '
'Please file a bug report on JIRA. (Imported object was '
'{lz4_block})'.format(lz4_block=repr(lz4_block))
)
def lz4_compress(byts):
return int32_pack(len(byts)) + lz4_block.compress(byts)[4:]
def lz4_decompress(byts):
return lz4_block.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
segment_codec_lz4 = SegmentCodec(lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
DRIVER_NAME, DRIVER_VERSION = 'DataStax Python Driver', sys.modules['cassandra'].__version__
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')
class EndPoint(object):
@property
def address(self):
raise NotImplementedError()
@property
def port(self):
raise NotImplementedError()
@property
def ssl_options(self):
return None
@property
def socket_family(self):
return socket.AF_UNSPEC
def resolve(self):
raise NotImplementedError()
class EndPointFactory(object):
cluster = None
def configure(self, cluster):
self.cluster = cluster
return self
|
Apache License 2.0
|
ericgibert/supersid
|
supersid/supersid.py
|
SuperSID.about_app
|
python
|
def about_app(self):
msg = """This program is designed to detect Sudden Ionosphere Disturbances (SID), \
which are caused by a blast of intense X-ray radiation when there is a Solar Flare on the Sun.\n\n""" + "Controller: " + self.version + "\n" + "Sampler: " + self.sampler.version + "\n" "Timer: " + self.timer.version + "\n" "Config: " + self.config.version + "\n" "Logger: " + self.logger.version + "\n" "Sidfile: " + self.logger.sid_file.version + "\n" + "Viewer: " + self.viewer.version + "\n" + "\n\nAuthor: Eric Gibert ericgibert@yahoo.fr" + "\n\nVisit http://solar-center.stanford.edu/SID/sidmonitor/ for more information."
return msg
|
return a text indicating various information on the app, incl, versions
|
https://github.com/ericgibert/supersid/blob/0f44a2c8286d2d5c60b8abc2b7f103357de02f78/supersid/supersid.py#L194-L207
|
from __future__ import print_function
import os.path
import argparse
from matplotlib.mlab import psd as mlab_psd
from sidtimer import SidTimer
from sampler import Sampler
from config import Config
from logger import Logger
class SuperSID():
running = False
def __init__(self, config_file='', read_file=None):
self.version = "EG 1.4 20150801"
self.timer = None
self.sampler = None
self.viewer = None
print("Reading supersid.cfg ...", end='')
self.config = Config(os.path.expanduser(config_file) or "supersid.cfg")
self.config.supersid_check()
if not self.config.config_ok:
print("ERROR:", self.config.config_err)
exit(1)
else:
print(self.config.filenames)
self.config["supersid_version"] = self.version
self.logger = Logger(self, read_file)
if 'utc_starttime' not in self.config:
self.config['utc_starttime'] = self.logger.sid_file.sid_params["utc_starttime"]
if self.config['viewer'] == 'wx':
try:
from wxsidviewer import wxSidViewer
self.viewer = wxSidViewer(self)
wx_imported = True
except ImportError:
print("'wx' module not imported.")
wx_imported = False
elif self.config['viewer'] == 'tk':
from tksidviewer import tkSidViewer
self.viewer = tkSidViewer(self)
elif self.config['viewer'] == 'text':
from textsidviewer import textSidViewer
self.viewer = textSidViewer(self)
else:
print("ERROR: Unknown viewer", sid.config['viewer'])
exit(2)
if (self.config['viewer'] == 'wx' and wx_imported) or self.config['viewer'] == 'tk':
self.psd = self.viewer.get_psd
else:
self.psd = mlab_psd
self.buffer_size = int(24*60*60 / self.config['log_interval'])
self.sampler = Sampler(self, audio_sampling_rate = self.config['audio_sampling_rate'], NFFT = 1024);
if not self.sampler.sampler_ok:
self.close()
exit(3)
else:
self.sampler.set_monitored_frequencies(self.config.stations);
for ibuffer, station in enumerate(self.config.stations):
station['raw_buffer'] = self.logger.sid_file.data[ibuffer]
self.viewer.status_display("Waiting for Timer ... ")
self.timer = SidTimer(self.config['log_interval'], self.on_timer)
def clear_all_data_buffers(self):
self.logger.sid_file.clear_buffer(next_day = True)
def on_timer(self):
current_index = self.timer.data_index
utc_now = self.timer.utc_now
message = "%s [%d] Capturing data..." % (self.timer.get_utc_now(), current_index)
self.viewer.status_display(message, level=1)
signal_strengths = []
try:
data = self.sampler.capture_1sec()
Pxx, freqs = self.psd(data, self.sampler.NFFT, self.sampler.audio_sampling_rate)
for binSample in self.sampler.monitored_bins:
signal_strengths.append(Pxx[binSample])
except IndexError as idxerr:
print("Index Error:", idxerr)
print("Data len:", len(data))
except TypeError as err_te:
print("Warning:", err_te)
with self.timer.lock:
if self.timer.utc_now.minute == 0 and self.timer.utc_now.second < self.config['log_interval']:
if self.config['hourly_save'] == 'YES':
fileName = "hourly_current_buffers.raw.ext.%s.csv" % (self.logger.sid_file.sid_params['utc_starttime'][:10])
self.save_current_buffers(filename=fileName, log_type='raw', log_format='supersid_extended')
if self.timer.utc_now.hour == 0:
for log_format in self.config['log_format'].split(','):
self.save_current_buffers(log_type=self.config['log_type'], log_format=log_format)
self.clear_all_data_buffers()
message = self.timer.get_utc_now() + " [%d] " % current_index
for station, strength in zip(self.config.stations, signal_strengths):
station['raw_buffer'][current_index] = strength
message += station['call_sign'] + "=%f " % strength
self.logger.sid_file.timestamp[current_index] = utc_now
self.viewer.status_display(message, level=2)
def save_current_buffers(self, filename='', log_type='raw', log_format = 'both'):
filenames = []
if log_format.startswith('both') or log_format.startswith('sid'):
fnames = self.logger.log_sid_format(self.config.stations, '', log_type=log_type, extended=log_format.endswith('extended'))
filenames += fnames
if log_format.startswith('both') or log_format.startswith('supersid'):
fnames = self.logger.log_supersid_format(self.config.stations, filename, log_type=log_type, extended=log_format.endswith('extended'))
filenames += fnames
return filenames
def on_close(self):
self.close()
def run(self, wx_app = None):
self.__class__.running = True
self.viewer.run()
def close(self):
self.__class__.running = False
if self.sampler:
self.sampler.close()
if self.timer:
self.timer.stop()
if self.viewer:
self.viewer.close()
|
MIT License
|
googkit/googkit
|
googkit/lib/command_tree.py
|
CommandTree.__init__
|
python
|
def __init__(self, tree=None):
if tree is None:
self._tree = CommandTree.DEFAULT_TREE.copy()
else:
self._tree = tree
|
Creates a command tree.
You can set customized command tree by first argument.
|
https://github.com/googkit/googkit/blob/cacb37bf65e5ac19379b329beb02af907240aa60/googkit/lib/command_tree.py#L31-L38
|
from googkit.commands.build import BuildCommand
from googkit.commands.candidates import CandidatesCommand
from googkit.commands.init import InitCommand
from googkit.commands.lint import LintCommand
from googkit.commands.ready import ReadyCommand
from googkit.commands.setup import SetupCommand
from googkit.commands.update_deps import UpdateDepsCommand
class CommandTree(object):
"""Default command tree.
"""
DEFAULT_TREE = {
'_candidates': CandidatesCommand,
'build': BuildCommand,
'config': {
'apply': ReadyCommand
},
'deps': {
'update': UpdateDepsCommand
},
'init': InitCommand,
'lint': LintCommand,
'ready': ReadyCommand,
'setup': SetupCommand,
}
|
MIT License
|
openstack/cinder
|
cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py
|
ShowCommand._parser
|
python
|
def _parser(self, content=None):
rc, out = super(ShowCommand, self)._parser(content)
if rc != 0:
return rc, out
if len(out) < 6:
return rc, []
detect_type = self.detect_type()
if detect_type == "list":
start_id = self.detect_detail_start_index(out)
if start_id < 0:
return rc, []
result = content_lines_to_dict(out[start_id:-3])
else:
start_id = self.detect_table_start_index(out)
if start_id < 0:
return rc, []
result = table_to_dict(out[start_id:-4])
return rc, result
|
Parse Table or Detail format into dict.
# Table format
ID Name LD-amount
----------------------
123 LV-1 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
# Detail format
ID: 5DE94FF775D81C30
Name: LV-1
LD-amount: 1
# Result
{
'ID': '123',
'Name': 'LV-1',
'LD-amount': '1'
}
:param content: The parse Content.
:returns: parse result
|
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py#L550-L614
|
import abc
import os
import time
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import strutils
import six
from cinder import utils
LOG = logging.getLogger(__name__)
DEFAULT_RETRY_TIME = 5
def retry_cli(func):
def inner(self, *args, **kwargs):
total_retry_time = self.cli_retry_time
if total_retry_time is None:
total_retry_time = DEFAULT_RETRY_TIME
retry_time = 0
while retry_time < total_retry_time:
rc, out = func(self, *args, **kwargs)
retry_time += 1
if rc == 0:
break
LOG.error(
'Retry %(retry)s times: %(method)s Failed '
'%(rc)s: %(reason)s', {
'retry': retry_time,
'method': self.__class__.__name__,
'rc': rc,
'reason': out})
if rc == 1:
break
elif rc == 11:
break
elif rc == 20:
break
LOG.debug(
'Method: %(method)s Return Code: %(rc)s '
'Output: %(out)s', {
'method': self.__class__.__name__, 'rc': rc, 'out': out})
return rc, out
return inner
def os_execute(fd, raidcmd_timeout, command_line):
os.write(fd, command_line.encode('utf-8'))
return os_read(fd, 8192, 'RAIDCmd:>', raidcmd_timeout)
def os_read(fd, buffer_size, cmd_pattern, raidcmd_timeout):
content = ''
start_time = int(time.time())
while True:
time.sleep(0.5)
output = os.read(fd, buffer_size)
if len(output) > 0:
content += output.decode('utf-8')
if content.find(cmd_pattern) >= 0:
break
if int(time.time()) - start_time > raidcmd_timeout:
content = 'Raidcmd timeout: %s' % content
LOG.error(
'Raidcmd exceeds cli timeout [%(timeout)s]s.', {
'timeout': raidcmd_timeout})
break
return content
def strip_empty_in_list(list):
result = []
for entry in list:
entry = entry.strip()
if entry != "":
result.append(entry)
return result
def table_to_dict(table):
tableHeader = table[0].split(" ")
tableHeaderList = strip_empty_in_list(tableHeader)
result = []
for i in range(len(table) - 2):
if table[i + 2].strip() == "":
break
resultEntry = {}
tableEntry = table[i + 2].split(" ")
tableEntryList = strip_empty_in_list(tableEntry)
for key, value in zip(tableHeaderList, tableEntryList):
resultEntry[key] = value
result.append(resultEntry)
return result
def content_lines_to_dict(content_lines):
result = []
resultEntry = {}
for content_line in content_lines:
if content_line.strip() == "":
result.append(resultEntry)
resultEntry = {}
continue
split_entry = content_line.strip().split(": ", 1)
resultEntry[split_entry[0]] = split_entry[1]
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCommand(object):
def __init__(self):
super(BaseCommand, self).__init__()
@abc.abstractmethod
def execute(self, *args, **kwargs):
pass
class ShellCommand(BaseCommand):
def __init__(self, cli_conf):
super(ShellCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
@retry_cli
def execute(self, *args, **kwargs):
commands = ' '.join(args)
result = None
rc = 0
try:
result, err = utils.execute(commands, shell=True)
except processutils.ProcessExecutionError as pe:
rc = pe.exit_code
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute command. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'exit_code': pe.exit_code, 'result': result})
return rc, result
class ExecuteCommand(BaseCommand):
def __init__(self, cli_conf):
super(ExecuteCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
@retry_cli
def execute(self, *args, **kwargs):
result = None
rc = 0
try:
result, err = utils.execute(*args, **kwargs)
except processutils.ProcessExecutionError as pe:
rc = pe.exit_code
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute command. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'exit_code': pe.exit_code, 'result': result})
return rc, result
class CLIBaseCommand(BaseCommand):
def __init__(self, cli_conf):
super(CLIBaseCommand, self).__init__()
self.cli_retry_time = cli_conf.get('cli_retry_time')
self.raidcmd_timeout = cli_conf.get('raidcmd_timeout')
self.cli_cache = cli_conf.get('cli_cache')
self.pid = cli_conf.get('pid')
self.fd = cli_conf.get('fd')
self.command = ""
self.parameters = ()
self.show_noinit = ""
self.command_line = ""
def _generate_command(self, parameters):
self.parameters = parameters
parameters_line = ' '.join(parameters)
self.command_line = "{0} {1} {2}\n".format(
self.command,
parameters_line,
self.show_noinit)
return self.command_line
def _parser(self, content=None):
content = content.replace("\r", "")
content = content.replace("\\/-", "")
content = content.strip()
LOG.debug(content)
if content is not None:
content_lines = content.split("\n")
rc, out = self._parse_return(content_lines)
if rc != 0:
return rc, out
else:
return rc, content_lines
return -1, None
@retry_cli
def execute(self, *args, **kwargs):
command_line = self._generate_command(args)
LOG.debug('Executing: %(command)s', {
'command': strutils.mask_password(command_line)})
rc = 0
result = None
try:
content = self._execute(command_line)
rc, result = self._parser(content)
except processutils.ProcessExecutionError as pe:
rc = -2
result = pe.stdout
result = result.replace('\n', '\\n')
LOG.error(
'Error on execute %(command)s. '
'Error code: %(exit_code)d Error msg: %(result)s', {
'command': strutils.mask_password(command_line),
'exit_code': pe.exit_code,
'result': result})
return rc, result
def _execute(self, command_line):
return os_execute(
self.fd, self.raidcmd_timeout, command_line)
def _parse_return(self, content_lines):
rc = 0
if 'Raidcmd timeout' in content_lines[0]:
rc = -3
return_cli_result = content_lines
elif len(content_lines) < 4:
rc = -4
return_cli_result = 'Raidcmd output error: %s' % content_lines
else:
return_value = content_lines[-3].strip().split(' ', 1)[1]
return_cli_result = content_lines[-4].strip().split(' ', 1)[1]
rc = int(return_value, 16)
return rc, return_cli_result
class ConnectRaid(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(ConnectRaid, self).__init__(*args, **kwargs)
self.command = "connect"
class CheckConnection(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CheckConnection, self).__init__(*args, **kwargs)
self.command = "lock"
class InitCache(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(InitCache, self).__init__(*args, **kwargs)
self.command = "utility init-cache"
class CreateLD(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateLD, self).__init__(*args, **kwargs)
self.command = "create ld"
class CreateLV(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateLV, self).__init__(*args, **kwargs)
self.command = "create lv"
class CreatePartition(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreatePartition, self).__init__(*args, **kwargs)
self.command = "create part"
class DeletePartition(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(DeletePartition, self).__init__(*args, **kwargs)
self.command = "delete part"
class SetPartition(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(SetPartition, self).__init__(*args, **kwargs)
self.command = "set part"
class SetLV(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(SetLV, self).__init__(*args, **kwargs)
self.command = "set lv"
class SetSnapshot(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(SetSnapshot, self).__init__(*args, **kwargs)
self.command = "set si"
class CreateMap(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateMap, self).__init__(*args, **kwargs)
self.command = "create map"
class DeleteMap(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(DeleteMap, self).__init__(*args, **kwargs)
self.command = "delete map"
class CreateSnapshot(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateSnapshot, self).__init__(*args, **kwargs)
self.command = "create si"
class DeleteSnapshot(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(DeleteSnapshot, self).__init__(*args, **kwargs)
self.command = "delete si"
class CreateReplica(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateReplica, self).__init__(*args, **kwargs)
self.command = "create replica"
class DeleteReplica(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(DeleteReplica, self).__init__(*args, **kwargs)
self.command = "delete replica"
class CreateIQN(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(CreateIQN, self).__init__(*args, **kwargs)
self.command = "create iqn"
class DeleteIQN(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(DeleteIQN, self).__init__(*args, **kwargs)
self.command = "delete iqn"
class SetIOTimeout(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(SetIOTimeout, self).__init__(*args, **kwargs)
self.command = "utility set io-timeout"
class ShowCommand(CLIBaseCommand):
def __init__(self, *args, **kwargs):
super(ShowCommand, self).__init__(*args, **kwargs)
self.param_detail = "-l"
self.default_type = "table"
self.start_key = ""
if self.cli_cache:
self.show_noinit = "-noinit"
|
Apache License 2.0
|
microsoft/qlib
|
qlib/workflow/expm.py
|
MLflowExpManager._get_exp
|
python
|
def _get_exp(self, experiment_id=None, experiment_name=None):
assert (
experiment_id is not None or experiment_name is not None
), "Please input at least one of experiment/recorder id or name before retrieving experiment/recorder."
if experiment_id is not None:
try:
exp = self.client.get_experiment(experiment_id)
if exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
return experiment
except MlflowException:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment id is correct."
)
elif experiment_name is not None:
try:
exp = self.client.get_experiment_by_name(experiment_name)
if exp is None or exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, experiment_name, self.uri)
return experiment
except MlflowException as e:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment name is correct."
)
|
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
raise errors.
|
https://github.com/microsoft/qlib/blob/7c31012b507a3823117bddcc693fc64899460b2a/qlib/workflow/expm.py#L354-L385
|
from urllib.parse import urlparse
import mlflow
from filelock import FileLock
from mlflow.exceptions import MlflowException
from mlflow.entities import ViewType
import os, logging
from pathlib import Path
from contextlib import contextmanager
from typing import Optional, Text
from .exp import MLflowExperiment, Experiment
from ..config import C
from .recorder import Recorder
from ..log import get_module_logger
logger = get_module_logger("workflow", logging.INFO)
class ExpManager:
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
self._current_uri = uri
self._default_exp_name = default_exp_name
self.active_experiment = None
def __repr__(self):
return "{name}(current_uri={curi})".format(name=self.__class__.__name__, curi=self._current_uri)
def start_exp(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
**kwargs,
):
raise NotImplementedError(f"Please implement the `start_exp` method.")
def end_exp(self, recorder_status: Text = Recorder.STATUS_S, **kwargs):
raise NotImplementedError(f"Please implement the `end_exp` method.")
def create_exp(self, experiment_name: Optional[Text] = None):
raise NotImplementedError(f"Please implement the `create_exp` method.")
def search_records(self, experiment_ids=None, **kwargs):
raise NotImplementedError(f"Please implement the `search_records` method.")
def get_exp(self, *, experiment_id=None, experiment_name=None, create: bool = True, start: bool = False):
if experiment_id is None and experiment_name is None:
if self.active_experiment is not None:
return self.active_experiment
experiment_name = self._default_exp_name
if create:
exp, is_new = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
else:
exp, is_new = (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
if is_new and start:
self.active_experiment = exp
self.active_experiment.start()
return exp
def _get_or_create_exp(self, experiment_id=None, experiment_name=None) -> (object, bool):
try:
return (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
except ValueError:
if experiment_name is None:
experiment_name = self._default_exp_name
logger.warning(f"No valid experiment found. Create a new experiment with name {experiment_name}.")
pr = urlparse(self.uri)
if pr.scheme == "file":
with FileLock(os.path.join(pr.netloc, pr.path, "filelock")) as f:
return self.create_exp(experiment_name), True
return self.create_exp(experiment_name), True
def _get_exp(self, experiment_id=None, experiment_name=None) -> Experiment:
raise NotImplementedError(f"Please implement the `_get_exp` method")
def delete_exp(self, experiment_id=None, experiment_name=None):
raise NotImplementedError(f"Please implement the `delete_exp` method.")
@property
def default_uri(self):
if "kwargs" not in C.exp_manager or "uri" not in C.exp_manager["kwargs"]:
raise ValueError("The default URI is not set in qlib.config.C")
return C.exp_manager["kwargs"]["uri"]
@property
def uri(self):
return self._current_uri or self.default_uri
def set_uri(self, uri: Optional[Text] = None):
if uri is None:
logger.info("No tracking URI is provided. Use the default tracking URI.")
self._current_uri = self.default_uri
else:
self._current_uri = uri
self._set_uri()
def _set_uri(self):
raise NotImplementedError(f"Please implement the `_set_uri` method.")
def list_experiments(self):
raise NotImplementedError(f"Please implement the `list_experiments` method.")
class MLflowExpManager(ExpManager):
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
super(MLflowExpManager, self).__init__(uri, default_exp_name)
self._client = None
def _set_uri(self):
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
logger.info("{:}".format(self._client))
@property
def client(self):
if self._client is None:
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
return self._client
def start_exp(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
):
self.set_uri(uri)
if experiment_name is None:
experiment_name = self._default_exp_name
experiment, _ = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
self.active_experiment = experiment
self.active_experiment.start(recorder_id=recorder_id, recorder_name=recorder_name, resume=resume)
return self.active_experiment
def end_exp(self, recorder_status: Text = Recorder.STATUS_S):
if self.active_experiment is not None:
self.active_experiment.end(recorder_status)
self.active_experiment = None
self._current_uri = None
def create_exp(self, experiment_name: Optional[Text] = None):
assert experiment_name is not None
experiment_id = self.client.create_experiment(experiment_name)
experiment = MLflowExperiment(experiment_id, experiment_name, self.uri)
experiment._default_name = self._default_exp_name
return experiment
|
MIT License
|
lihuacai168/anotherfasterrunner
|
fastrunner/utils/loader.py
|
FileLoader.dump_yaml_file
|
python
|
def dump_yaml_file(yaml_file, data):
with io.open(yaml_file, 'w', encoding='utf-8') as stream:
yaml.dump(
data,
stream,
indent=4,
default_flow_style=False,
encoding='utf-8',
allow_unicode=True)
|
dump yaml file
|
https://github.com/lihuacai168/anotherfasterrunner/blob/b0175c083b774f77576e80b5b9a3ff886d728220/fastrunner/utils/loader.py#L64-L74
|
import copy
import datetime
import functools
import importlib
import io
import json
import os
import shutil
import sys
import tempfile
import types
from threading import Thread
import requests
import yaml
from bs4 import BeautifulSoup
from httprunner import HttpRunner, logger, parser
from httprunner.exceptions import FunctionNotFound, VariableNotFound
from requests.cookies import RequestsCookieJar
from requests_toolbelt import MultipartEncoder
from fastrunner import models
from fastrunner.utils.parser import Format
from FasterRunner.settings.base import BASE_DIR
logger.setup_logger('DEBUG')
TEST_NOT_EXISTS = {
"code": "0102",
"status": False,
"msg": "节点下没有接口或者用例集"
}
def is_function(tup):
name, item = tup
return isinstance(item, types.FunctionType)
def is_variable(tup):
name, item = tup
if callable(item):
return False
if isinstance(item, types.ModuleType):
return False
if name.startswith("_"):
return False
return True
class FileLoader(object):
@staticmethod
|
MIT License
|
tcalmant/ipopo
|
tests/framework/test_packages.py
|
PackagesTest.tearDown
|
python
|
def tearDown(self):
self.framework.stop()
FrameworkFactory.delete_framework()
os.environ['bundle.import.fail'] = "0"
|
Called after each test
|
https://github.com/tcalmant/ipopo/blob/1d4b81207e67890dfccc8f562336c7104f194c17/tests/framework/test_packages.py#L73-L81
|
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from pelix.framework import FrameworkFactory, Bundle
from tests import log_off, log_on
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
SERVICE_BUNDLE = "tests.framework.service_bundle"
SIMPLE_BUNDLE = "tests.framework.simple_bundle"
def _list_modules(path, recursive=False):
results = set()
for filename in os.listdir(path):
if '__pycache__' in filename or '__main__' in filename:
continue
file_path = os.path.join(path, filename)
if os.path.isdir(file_path) and recursive:
results.update(_list_modules(file_path))
elif os.path.isfile(file_path):
results.add(os.path.splitext(file_path)[0])
return results
class PackagesTest(unittest.TestCase):
def setUp(self):
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.context = self.framework.get_bundle_context()
self.test_root = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "vault")
|
Apache License 2.0
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py
|
sample_read_tensorboard_time_series_data
|
python
|
def sample_read_tensorboard_time_series_data():
client = aiplatform_v1beta1.TensorboardServiceClient()
request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest(
tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
)
response = client.read_tensorboard_time_series_data(request=request)
print(response)
|
Snippet for read_tensorboard_time_series_data
|
https://github.com/googleapis/python-aiplatform/blob/c1c2326b2342ab1b6f4c4ce3852e63376eae740d/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py#L30-L45
|
from google.cloud import aiplatform_v1beta1
|
Apache License 2.0
|
yongzhuo/pytorch-nlu
|
pytorch_nlu/pytorch_sequencelabeling/slTqdm.py
|
tqdm.display
|
python
|
def display(self, msg=None, pos=None):
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
|
Use `self.sp` and to display `msg` in the specified `pos`.
Parameters
----------
msg : what to display (default: repr(self))
pos : position to display in. (default: abs(self.pos))
|
https://github.com/yongzhuo/pytorch-nlu/blob/acb5cdb450efaac0c64b38d58a66aca9f942254b/pytorch_nlu/pytorch_sequencelabeling/slTqdm.py#L141-L156
|
from platform import system as _curos
import time
import sys
import re
import os
try:
from weakref import WeakSet
_unicode = unicode
_unich = unichr
_range = xrange
except Exception as e:
_range = range
_unicode = str
WeakSet = set
_unich = chr
CUR_OS = _curos()
IS_WIN = CUR_OS in ['Windows', 'cli']
IS_NIX = (not IS_WIN) and any(
CUR_OS.startswith(i) for i in
['CYGWIN', 'MSYS', 'Linux', 'Darwin', 'SunOS',
'FreeBSD', 'NetBSD', 'OpenBSD'])
try:
if IS_WIN:
import colorama
colorama.init()
else:
colorama = None
except ImportError:
colorama = None
UTF_FMT = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
RE_ANSI = re.compile(r"\x1b\[[;\d]*[A-Za-z]")
ASCII_FMT = " 123456789#"
class tqdm:
def __init__(self, iterable=None, desc=None, unit_scale=False, unit_divisor=1000, gui=False):
total = len(iterable)
file = sys.stderr
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.ascii = ascii
self.fp = file
self.dynamic_miniters = True
self.dynamic_ncols = False
self.disable = False
self.unit_divisor = unit_divisor
self.unit_scale = unit_scale
from time import time
self._time = time
self.unit = 'it'
self.gui = gui
self.bar_format = None
self.avg_time = None
self.postfix = None
self.ncols = None
self.last_print_n = 0
self.mininterval = 0.1
self.smoothing = 0.3
self.miniters = 0
self.pos = 0
self.n = 0
if not gui:
self.sp = self.status_printer(self.fp)
self.display()
self.last_print_t = self._time()
self.start_t = self.last_print_t
def __repr__(self):
return self.format_meter(**self.format_dict)
def __iter__(self):
iterable = self.iterable
mininterval = self.mininterval
miniters = self.miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
for obj in iterable:
yield obj
n += 1
if n - last_print_n >= self.miniters:
miniters = self.miniters
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.display()
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
|
Apache License 2.0
|
wavefronthq/python-client
|
wavefront_api_client/models/paged_monitored_application_dto.py
|
PagedMonitoredApplicationDTO.items
|
python
|
def items(self, items):
self._items = items
|
Sets the items of this PagedMonitoredApplicationDTO.
List of requested items # noqa: E501
:param items: The items of this PagedMonitoredApplicationDTO. # noqa: E501
:type: list[MonitoredApplicationDTO]
|
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/paged_monitored_application_dto.py#L120-L129
|
import pprint
import re
import six
from wavefront_api_client.configuration import Configuration
class PagedMonitoredApplicationDTO(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cursor': 'str',
'items': 'list[MonitoredApplicationDTO]',
'limit': 'int',
'more_items': 'bool',
'offset': 'int',
'sort': 'Sorting',
'total_items': 'int'
}
attribute_map = {
'cursor': 'cursor',
'items': 'items',
'limit': 'limit',
'more_items': 'moreItems',
'offset': 'offset',
'sort': 'sort',
'total_items': 'totalItems'
}
def __init__(self, cursor=None, items=None, limit=None, more_items=None, offset=None, sort=None, total_items=None, _configuration=None):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._cursor = None
self._items = None
self._limit = None
self._more_items = None
self._offset = None
self._sort = None
self._total_items = None
self.discriminator = None
if cursor is not None:
self.cursor = cursor
if items is not None:
self.items = items
if limit is not None:
self.limit = limit
if more_items is not None:
self.more_items = more_items
if offset is not None:
self.offset = offset
if sort is not None:
self.sort = sort
if total_items is not None:
self.total_items = total_items
@property
def cursor(self):
return self._cursor
@cursor.setter
def cursor(self, cursor):
self._cursor = cursor
@property
def items(self):
return self._items
@items.setter
|
Apache License 2.0
|
openstack/manila
|
manila/api/v2/share_network_subnets.py
|
ShareNetworkSubnetController.delete
|
python
|
def delete(self, req, share_network_id, share_network_subnet_id):
context = req.environ['manila.context']
try:
db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
try:
share_network_subnet = db_api.share_network_subnet_get(
context, share_network_subnet_id)
except exception.ShareNetworkSubnetNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
for share_server in share_network_subnet['share_servers'] or []:
shares = db_api.share_instances_get_all_by_share_server(
context, share_server['id'])
if shares:
msg = _("Cannot delete share network subnet %(id)s, it has "
"one or more shares.") % {
'id': share_network_subnet_id}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
if not self._all_share_servers_are_auto_deletable(
share_network_subnet):
msg = _("The service cannot determine if there are any "
"non-managed shares on the share network subnet %(id)s,"
"so it cannot be deleted. Please contact the cloud "
"administrator to rectify.") % {
'id': share_network_subnet_id}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network_subnet['share_servers']:
self.share_rpcapi.delete_share_server(context, share_server)
db_api.share_network_subnet_delete(context, share_network_subnet_id)
return webob.Response(status_int=http_client.ACCEPTED)
|
Delete specified share network subnet.
|
https://github.com/openstack/manila/blob/34d209484366cd921e052d37c5f9daef5e97af20/manila/api/v2/share_network_subnets.py#L64-L105
|
from manila.api import common
from oslo_db import exception as db_exception
from oslo_log import log
from six.moves import http_client
import webob
from webob import exc
from manila.api.openstack import wsgi
from manila.api.views import share_network_subnets as subnet_views
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
from manila.share import rpcapi as share_rpcapi
LOG = log.getLogger(__name__)
class ShareNetworkSubnetController(wsgi.Controller):
resource_name = 'share_network_subnet'
_view_builder_class = subnet_views.ViewBuilder
def __init__(self):
super(ShareNetworkSubnetController, self).__init__()
self.share_rpcapi = share_rpcapi.ShareAPI()
@wsgi.Controller.api_version("2.51")
@wsgi.Controller.authorize
def index(self, req, share_network_id):
context = req.environ['manila.context']
try:
share_network = db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return self._view_builder.build_share_network_subnets(
req, share_network.get('share_network_subnets'))
def _all_share_servers_are_auto_deletable(self, share_network_subnet):
return all([ss['is_auto_deletable'] for ss
in share_network_subnet['share_servers']])
@wsgi.Controller.api_version('2.51')
@wsgi.Controller.authorize
|
Apache License 2.0
|
kodi-community-addons/script.module.metadatautils
|
lib/helpers/imdb.py
|
Imdb.__init__
|
python
|
def __init__(self, simplecache=None, kodidb=None):
if not simplecache:
from simplecache import SimpleCache
self.cache = SimpleCache()
else:
self.cache = simplecache
if not kodidb:
if sys.version_info.major == 3:
from .kodidb import KodiDb
else:
from kodidb import KodiDb
self.kodidb = KodiDb()
else:
self.kodidb = kodidb
|
Initialize - optionaly provide simplecache object
|
https://github.com/kodi-community-addons/script.module.metadatautils/blob/937d9a2b57c856e370d132ecabdea173af815923/lib/helpers/imdb.py#L22-L36
|
import os, sys
if sys.version_info.major == 3:
from .utils import requests, try_parse_int
else:
from utils import requests, try_parse_int
import bs4 as BeautifulSoup
from simplecache import use_cache
class Imdb(object):
|
Apache License 2.0
|
qiskit/qiskit-aqua
|
qiskit/aqua/algorithms/education/deutsch_jozsa.py
|
DeutschJozsa.construct_circuit
|
python
|
def construct_circuit(self, measurement=False):
if self._circuit is not None:
return self._circuit
qc_preoracle = QuantumCircuit(
self._oracle.variable_register,
self._oracle.output_register,
)
qc_preoracle.h(self._oracle.variable_register)
qc_preoracle.x(self._oracle.output_register)
qc_preoracle.h(self._oracle.output_register)
qc_preoracle.barrier()
qc_oracle = self._oracle.circuit
qc_postoracle = QuantumCircuit(
self._oracle.variable_register,
self._oracle.output_register,
)
qc_postoracle.h(self._oracle.variable_register)
qc_postoracle.barrier()
self._circuit = qc_preoracle + qc_oracle + qc_postoracle
if measurement:
measurement_cr = ClassicalRegister(len(self._oracle.variable_register), name='m')
self._circuit.add_register(measurement_cr)
self._circuit.measure(self._oracle.variable_register, measurement_cr)
return self._circuit
|
Construct the quantum circuit
Args:
measurement (bool): Boolean flag to indicate
if measurement should be included in the circuit.
Returns:
QuantumCircuit: the QuantumCircuit object for the constructed circuit
|
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/algorithms/education/deutsch_jozsa.py#L64-L108
|
from typing import Optional, Union, Dict, Any
import logging
import operator
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit
from qiskit.providers import BaseBackend
from qiskit.providers import Backend
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.utils import get_subsystem_density_matrix
from qiskit.aqua.components.oracles import Oracle
logger = logging.getLogger(__name__)
class DeutschJozsa(QuantumAlgorithm):
def __init__(self,
oracle: Oracle,
quantum_instance: Optional[
Union[QuantumInstance, BaseBackend, Backend]] = None) -> None:
super().__init__(quantum_instance)
self._oracle = oracle
self._circuit = None
self._ret = {}
|
Apache License 2.0
|
mila-iqia/myia
|
myia/operations/prim_raise_.py
|
infer_raise
|
python
|
async def infer_raise(self, engine, x: xtype.ExceptionType):
return AbstractBottom()
|
Infer the return type of primitive `raise_`.
|
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/operations/prim_raise_.py#L14-L16
|
from .. import xtype
from ..lib import AbstractBottom, bprop_to_grad_transform, standard_prim
from . import primitives as P
def pyimpl_raise(x):
raise x
@standard_prim(P.raise_)
|
MIT License
|
python-discord/sir-lancebot
|
bot/exts/core/internal_eval/_helpers.py
|
EvalContext.locals
|
python
|
def locals(self) -> dict[str, Any]:
return {**collections.ChainMap(self.dependencies, self.context_vars, self._locals)}
|
Return a mapping of names->values needed for evaluation.
|
https://github.com/python-discord/sir-lancebot/blob/559e76ffbef7af85132d86f2e3ab8acf7e7f5eef/bot/exts/core/internal_eval/_helpers.py#L113-L115
|
import ast
import collections
import contextlib
import functools
import inspect
import io
import logging
import sys
import traceback
import types
from typing import Any, Optional, Union
log = logging.getLogger(__name__)
ExcInfo = tuple[type[Exception], Exception, types.TracebackType]
Namespace = dict[str, Any]
EVAL_WRAPPER = """
async def _eval_wrapper_function():
try:
with contextlib.redirect_stdout(_eval_context.stdout):
pass
if '_value_last_expression' in locals():
if inspect.isawaitable(_value_last_expression):
_value_last_expression = await _value_last_expression
_eval_context._value_last_expression = _value_last_expression
else:
_eval_context._value_last_expression = None
except Exception:
_eval_context.exc_info = sys.exc_info()
finally:
_eval_context.locals = locals()
_eval_context.function = _eval_wrapper_function
"""
INTERNAL_EVAL_FRAMENAME = "<internal eval>"
EVAL_WRAPPER_FUNCTION_FRAMENAME = "_eval_wrapper_function"
def format_internal_eval_exception(exc_info: ExcInfo, code: str) -> str:
exc_type, exc_value, tb = exc_info
stack_summary = traceback.StackSummary.extract(traceback.walk_tb(tb))
code = code.split("\n")
output = ["Traceback (most recent call last):"]
for frame in stack_summary:
if frame.filename == INTERNAL_EVAL_FRAMENAME:
line = code[frame.lineno - 1].lstrip()
if frame.name == EVAL_WRAPPER_FUNCTION_FRAMENAME:
name = INTERNAL_EVAL_FRAMENAME
else:
name = frame.name
else:
line = frame.line
name = frame.name
output.append(
f' File "{frame.filename}", line {frame.lineno}, in {name}\n'
f" {line}"
)
output.extend(traceback.format_exception_only(exc_type, exc_value))
return "\n".join(output)
class EvalContext:
def __init__(self, context_vars: Namespace, local_vars: Namespace):
self._locals = dict(local_vars)
self.context_vars = dict(context_vars)
self.stdout = io.StringIO()
self._value_last_expression = None
self.exc_info = None
self.code = ""
self.function = None
self.eval_tree = None
@property
def dependencies(self) -> dict[str, Any]:
return {
"print": functools.partial(print, file=self.stdout),
"contextlib": contextlib,
"inspect": inspect,
"sys": sys,
"_eval_context": self,
"_": self._value_last_expression,
}
@property
|
MIT License
|
abdur-rahmaanj/greenberry
|
greenberry/gb_ide.py
|
SearchDialog.replace
|
python
|
def replace(self, event=0):
self._search(doSearch=False, doReplace=True)
|
Command for Replace button
|
https://github.com/abdur-rahmaanj/greenberry/blob/5c71fd6a6b8e0af7c10cb40fe1aa9bdff5a6a312/greenberry/gb_ide.py#L269-L271
|
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import simpledialog
import tkinter.scrolledtext as tkst
import subprocess
color1 = ["var", "print", "set", "debug", "plot"]
color2 = ["string", "eval", "times", "action", "attribute", "bool"]
color3 = ["=", "<", "<=", ">", ">=", "if", "for"]
color4 = ["@"]
color5 = ["make", "see", "add", "class", "func", "call"]
class TextLineNumbers(tk.Canvas):
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs)
self.textwidget = None
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, *args):
self.delete("all")
i = self.textwidget.index("@0,0")
while True:
dline = self.textwidget.dlineinfo(i)
if dline is None:
break
y = dline[1]
linenum = str(i).split(".")[0]
self.create_text(2, y, anchor="nw", text=linenum)
i = self.textwidget.index("%s+1line" % i)
class CustomText(tk.Text):
def __init__(self, *args, **kwargs):
tkst.ScrolledText.__init__(self, *args, **kwargs)
self._orig = self._w + "_orig"
self.tk.call("rename", self._w, self._orig)
self.tk.createcommand(self._w, self._proxy)
def _proxy(self, *args):
try:
cmd = (self._orig,) + args
result = self.tk.call(cmd)
if (
args[0] in ("insert", "replace", "delete")
or args[0:3] == ("mark", "set", "insert")
or args[0:2] == ("xview", "moveto")
or args[0:2] == ("xview", "scroll")
or args[0:2] == ("yview", "moveto")
or args[0:2] == ("yview", "scroll")
):
self.event_generate("<<Change>>", when="tail")
return result
except:
pass
class MessageBox(tk.simpledialog.Dialog):
def __init__(self, parent, title, message):
self.messageText = message
tk.simpledialog.Dialog.__init__(self, parent, title)
def body(self, master):
self.frame = tk.Frame(master)
self.message = tk.Message(self.frame, text=self.messageText)
self.btn_cancel = tk.Button(self.frame, text="Cancel", command=self.cancel)
self.bind("<Return>", self.cancel)
self.frame.grid(column=0, row=0, sticky="NSEW")
self.message.grid(column=0, row=1)
self.btn_cancel.grid(column=0, row=2)
return self.btn_cancel
def destroy(self):
self.parent.messageOpen = False
super(MessageBox, self).destroy()
def buttonbox(self):
pass
class SearchDialog(tk.simpledialog.Dialog):
def __init__(self, parent, txt, old_text, title="Find and replace"):
self.txt = txt
self.messageOpen = False
self.messageRef = None
tk.simpledialog.Dialog.__init__(self, parent, title)
def body(self, master):
self.search_text = tk.StringVar()
self.replace_text = tk.StringVar()
self.isCaseSensitive = tk.IntVar()
self.isCaseSensitive.set(1)
self.isBackward = tk.IntVar()
self.isRegExp = tk.IntVar()
self.matchLength = tk.IntVar()
self.frame = tk.Frame(master)
self.frame_btn = tk.Frame(self.frame)
self.frame_check = tk.Frame(self.frame)
self.frame_entry = tk.Frame(self.frame)
self.search_entry = tk.Entry(
self.frame_entry, width=20, textvariable=self.search_text
)
self.replace_entry = tk.Entry(
self.frame_entry, width=20, textvariable=self.replace_text
)
self.check_case = tk.Checkbutton(
self.frame_check, text="Case sensitive", var=self.isCaseSensitive
)
self.check_search_backward = tk.Checkbutton(
self.frame_check, text="Search backward", var=self.isBackward
)
self.check_regexp = tk.Checkbutton(
self.frame_check, text="Use regular expression", var=self.isRegExp
)
self.btn_search = tk.Button(self.frame_btn, text="Find", command=self.search)
self.btn_replace = tk.Button(
self.frame_btn, text="Replace", command=self.replace
)
self.btn_search_and_replace = tk.Button(
self.frame_btn, text="Find and Replace", command=self.search_and_replace
)
self.btn_cancel = tk.Button(self.frame, text="Cancel", command=self.cancel)
self.frame.grid(column=0, row=0, sticky="NSEW")
self.btn_cancel.grid(column=1, row=1, sticky="E", padx=(4, 8), pady=(4, 8))
self.frame_entry.grid(column=0, row=0)
tk.Label(self.frame_entry, text="Find:").grid(column=0, row=0, sticky="W")
self.search_entry.grid(column=1, row=0)
tk.Label(self.frame_entry, text="Replace:").grid(
column=0, row=1, sticky="W", pady=(6, 12)
)
self.replace_entry.grid(column=1, row=1, pady=(6, 12))
self.frame_btn.grid(column=0, row=1, padx=(8, 4), pady=(4, 8))
self.btn_search.grid(column=0, row=0, sticky="W")
self.btn_replace.grid(column=1, row=0, sticky="W", padx=(2, 10))
self.btn_search_and_replace.grid(column=2, row=0, sticky="E")
self.frame_check.grid(column=1, row=0, pady=(6, 12))
self.check_case.grid(column=0, row=0, sticky="W")
self.check_search_backward.grid(column=0, row=1, sticky="W")
self.check_regexp.grid(column=0, row=2, sticky="W")
return self.search_entry
def _createMessage(self, text):
if self.messageOpen:
self._destroyMessage()
self.messageRef = MessageBox(self, title="", message=text)
self.messageOpen = True
def _destroyMessage(self):
if self.messageOpen:
self.messageRef.destroy()
self.messageRef = None
self.messageOpen = False
def _searchData(self):
return {
"caseSensitive": self.isCaseSensitive.get(),
"backwards": self.isBackward.get(),
"regexp": self.isRegExp.get(),
"search_text": self.search_text.get(),
"replace_text": self.replace_text.get(),
}
def _search(self, doSearch, doReplace):
if not doSearch and not doReplace:
return
self.txt.tag_configure("found", background="#aaaaaa")
self.txt.tag_configure("replaced", background="#aaaaaa")
data = self._searchData()
n_search = len(data["search_text"])
if doSearch and not n_search > 0:
return
if doSearch:
if data["backwards"]:
self.txt.mark_set("search_start", "insert")
self.txt.mark_set("search_end", "1.0" + "-1c")
else:
self.txt.mark_set("search_start", "insert")
self.txt.mark_set("search_end", "end")
if data["caseSensitive"]:
nocase = 0
else:
nocase = 1
if data["regexp"]:
start = self.txt.search(
r"{}".format(data["search_text"]),
self.txt.index("search_start"),
stopindex=self.txt.index("search_end"),
backwards=data["backwards"],
count=self.matchLength,
nocase=nocase,
regexp=True,
)
else:
start = self.txt.search(
data["search_text"],
self.txt.index("search_start"),
stopindex=self.txt.index("search_end"),
backwards=data["backwards"],
count=self.matchLength,
nocase=nocase,
)
if start:
end = start + "+{0}c".format(self.matchLength.get())
self.txt.tag_add("found", start, end)
if data["backwards"]:
self.txt.mark_set("insert", start)
else:
self.txt.mark_set("insert", end)
else:
self._createMessage("No matches found.")
return
if doReplace:
foundRanges = self.txt.tag_ranges("found")
if not foundRanges:
self._search(doSearch=True, doReplace=False)
return
foundStarts = [idx for i, idx in enumerate(foundRanges) if i % 2 == 0]
foundEnds = [idx for i, idx in enumerate(foundRanges) if i % 2 == 1]
for foundStart, foundEnd in zip(foundStarts, foundEnds):
self.txt.delete(foundStart, foundEnd)
self.txt.insert(foundStart, data["replace_text"], ("replaced",))
def search(self, event=0):
self._search(doSearch=True, doReplace=False)
|
Apache License 2.0
|
python-discord/sir-lancebot
|
bot/exts/avatar_modification/avatar_modify.py
|
file_safe_name
|
python
|
def file_safe_name(effect: str, display_name: str) -> str:
valid_filename_chars = f"-_. {string.ascii_letters}{string.digits}"
file_name = FILENAME_STRING.format(effect=effect, author=display_name)
file_name = file_name.replace(" ", "_")
cleaned_filename = unicodedata.normalize("NFKD", file_name).encode("ASCII", "ignore").decode()
cleaned_filename = "".join(c for c in cleaned_filename if c in valid_filename_chars)
return cleaned_filename
|
Returns a file safe filename based on the given effect and display name.
|
https://github.com/python-discord/sir-lancebot/blob/559e76ffbef7af85132d86f2e3ab8acf7e7f5eef/bot/exts/avatar_modification/avatar_modify.py#L45-L59
|
import asyncio
import json
import logging
import math
import string
import unicodedata
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Optional, TypeVar, Union
import discord
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours, Emojis
from bot.exts.avatar_modification._effects import PfpEffects
from bot.utils.extensions import invoke_help_command
from bot.utils.halloween import spookifications
log = logging.getLogger(__name__)
_EXECUTOR = ThreadPoolExecutor(10)
FILENAME_STRING = "{effect}_{author}.png"
MAX_SQUARES = 10_000
T = TypeVar("T")
GENDER_OPTIONS = json.loads(Path("bot/resources/holidays/pride/gender_options.json").read_text("utf8"))
async def in_executor(func: Callable[..., T], *args) -> T:
log.trace(f"Running {func.__name__} in an executor.")
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_EXECUTOR, func, *args)
|
MIT License
|
intelxed/xed
|
pysrc/read_xed_db.py
|
xed_reader_t._add_cpuid
|
python
|
def _add_cpuid(self):
for v in self.recs:
v.cpuid = []
ky = 'XED_ISA_SET_{}'.format(v.isa_set.upper())
if ky in self.cpuid_map:
v.cpuid = self.cpuid_map[ky]
|
set v.cpuid with list of cpuid bits for this instr
|
https://github.com/intelxed/xed/blob/d57a3bd0a8ad7a1f0c6e2a1b58060d9014021098/pysrc/read_xed_db.py#L396-L402
|
import sys
import re
import collections
import patterns
import slash_expand
import genutil
import opnd_types
import opnds
import cpuid_rdr
import map_info_rdr
def die(s):
sys.stdout.write("ERROR: {0}\n".format(s))
sys.exit(1)
def msgb(b,s=''):
sys.stderr.write("[{0}] {1}\n".format(b,s))
class inst_t(object):
def __init__(self):
pass
def __str__(self):
s = []
for fld in sorted(self.__dict__.keys()):
s.append("{}: {}".format(fld,getattr(self,fld)))
return "\n".join(s) + '\n'
def get_eosz_list(self):
if self.space == 'legacy':
if hasattr(self,'attributes'):
if 'BYTEOP' in self.attributes:
return [8]
if hasattr(self,'eosz'):
if self.eosz == 'oszall':
return [16,32,64]
if self.eosz == 'osznot16':
return [32,64]
if self.eosz == 'osznot64':
return [16,32]
if self.eosz == 'o16':
return [16]
if self.eosz == 'o32':
return [32]
if self.eosz == 'o64':
return [64]
die("Could not handle eosz {}".format(self.eosz))
die("Did not find eosz for {}".format(self.iclass))
else:
return None
class width_info_t(object):
def __init__(self, name, dtype, widths):
self.name = name.upper()
self.dtype = dtype
self.widths = widths
def __str__(self):
s = []
s.append("name {}".format(self.name))
s.append("datatype: {}".format(self.dtype))
s.append("widths: {}".format(",".join(self.widths.values())))
return " ".join(s)
completely_numeric = re.compile(r'^[0-9]+$')
def _is_bits(val):
global completely_numeric
length = len(val)
if length > 4:
if val[-4:] == "bits":
number_string = val[0:-4]
if completely_numeric.match(number_string):
return number_string
return None
def _op_immd(op):
if op.name == 'IMM0':
if op.oc2 == 'd':
return True
def _op_immw(op):
if op.name == 'IMM0':
if op.oc2 == 'w':
return True
def _op_immz(op):
if op.name == 'IMM0':
if op.oc2 == 'z':
return True
def _op_immv(op):
if op.name == 'IMM0':
if op.oc2 == 'v':
return True
return False
def _op_imm8(op):
if op.name == 'IMM0':
if op.oc2 == 'b':
return True
return False
def _get_mempop_width_code(v):
for op in v.parsed_operands:
if op.name == 'MEM0':
return op.oc2
die("Could not find evex memop for {}".format(v.iclass))
def _set_eosz(v):
eosz = 'oszall'
if v.space == 'legacy':
if 'EOSZ=1' in v.pattern:
eosz = 'o16'
elif 'EOSZ=2' in v.pattern:
eosz = 'o32'
elif 'EOSZ=3' in v.pattern:
eosz = 'o64'
elif 'EOSZ!=1' in v.pattern:
eosz = 'osznot16'
elif 'EOSZ!=3' in v.pattern:
eosz = 'osznot64'
if v.mode_restriction != 'unspecified':
if v.mode_restriction == 0:
if v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o32'
else:
eosz = 'o16'
elif v.mode_restriction == 1:
if v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o16'
else:
eosz = 'o32'
elif v.mode_restriction == 2:
if v.default_64b:
eosz = 'o64'
elif v.rexw_prefix == '1':
eosz = 'o64'
elif 'FORCE64' in v.pattern:
eosz = 'o64'
elif v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o16'
else:
eosz = 'o32'
v.eosz = eosz
def is_positive_integer(s):
if re.match(r'^[0-9]+$',s):
return True
return False
class xed_reader_t(object):
def __init__(self,
state_bits_filename,
instructions_filename,
widths_filename,
element_types_filename,
cpuid_filename='',
map_descriptions_filename=''):
self.xtypes = self._gen_xtypes(element_types_filename)
self.width_type_dict, self.width_info_dict = self._gen_widths(widths_filename)
self.state_bits = self._parse_state_bits(state_bits_filename)
self.map_info = []
if map_descriptions_filename:
self.map_info = map_info_rdr.read_file(map_descriptions_filename)
self.deleted_unames = {}
self.deleted_instructions = {}
self.recs = self._process_lines(instructions_filename)
self._find_opcodes()
self._fix_real_opcode()
self._parse_operands()
self._generate_operands()
self._generate_memop_rw_field()
self._generate_missing_iforms()
self._summarize_operands()
self._summarize_vsib()
self._summarize_sibmem()
self.cpuid_map = {}
if cpuid_filename:
self.cpuid_map = cpuid_rdr.read_file(cpuid_filename)
self._add_cpuid()
self._add_vl()
self._add_broadcasting()
self._evex_disp8_scaling()
def get_width_info_dict(self):
return self.width_info_dict
def _refine_widths_input(self,lines):
comment_pattern = re.compile(r'#.*$')
width_info_dict = {}
for line in lines:
pline = comment_pattern.sub('',line).strip()
if pline == '':
continue
wrds = pline.split()
ntokens = len(wrds)
if ntokens == 3:
(name, dtype, all_width) = wrds
width8 = all_width
width16 = all_width
width32 = all_width
width64 = all_width
elif ntokens == 5:
width8='0'
(name, dtype, width16, width32, width64) = wrds
else:
die("Bad number of tokens on line: " + line)
bit_widths = {}
for osz,val in zip([8,16,32,64], [width8, width16, width32, width64]):
number_string = _is_bits(val)
if number_string:
bit_widths[osz] = number_string
else:
bit_widths[osz] = str(int(val)*8)
width_info_dict[name] = width_info_t(name, dtype, bit_widths)
return width_info_dict
def _gen_widths(self, fn):
lines = open(fn,'r').readlines()
width_info_dict = self._refine_widths_input(lines)
width_type_dict = {}
for w in width_info_dict.values():
width_type_dict[w.name] = w.dtype
return width_type_dict, width_info_dict
def _gen_xtypes(self, fn):
lines = open(fn,'r').readlines()
xtypes_dict = opnd_types.read_operand_types(lines)
return set(xtypes_dict.keys())
def _compute_memop_rw(self,v):
read=False
write=False
for opnd in v.parsed_operands:
if opnd.name.startswith('MEM'):
if 'r' in opnd.rw:
read = True
if 'w' in opnd.rw:
write = True
elif opnd.bits:
if 'STACKPUSH' in opnd.bits:
write = True
if 'STACKPOP' in opnd.bits:
read = True
if read and write:
return 'mem-rw'
elif write:
return 'mem-w'
elif read:
return 'mem-r'
return 'none'
def _compute_operands(self,v):
expl_operand_list = []
impl_operand_list = []
for op in v.parsed_operands:
s = None
if op.name in ['MEM0','MEM1']:
s = 'MEM'
elif op.name in ['IMM0','IMM1']:
s = 'IMM'
elif op.type == 'nt_lookup_fn':
s = op.lookupfn_name_base
elif op.type == 'reg':
s = op.bits
s = re.sub(r'XED_REG_','',s)
elif op.type == 'imm_const':
if op.name in ['BCAST','SCALE']:
continue
s = op.name
else:
msbg("UNHANDLED","{}".format(op))
if s:
if op.visibility in ['IMPLICIT','SUPPRESSED']:
impl_operand_list.append(s)
if op.visibility in ['EXPLICIT', 'DEFAULT']:
expl_operand_list.append(s)
return expl_operand_list, impl_operand_list
def _generate_operands(self):
for v in self.recs:
if not hasattr(v,'iform'):
v.iform=''
v.explicit_operands, v.implicit_operands = self._compute_operands(v)
if not v.explicit_operands:
v.explicit_operands = ['none']
if not v.implicit_operands:
v.implicit_operands = ['none']
def _generate_one_iform(self,v):
tokens = []
for op in v.parsed_operands:
if op.visibility in ['IMPLICIT', 'EXPLICIT', 'DEFAULT']:
s = None
if op.name in ['MEM0','MEM1']:
s = 'MEM'
if op.oc2:
s += op.oc2
elif op.name in ['IMM0','IMM1']:
s = 'IMM'
if op.oc2:
s += op.oc2
elif op.type == 'nt_lookup_fn':
s = op.lookupfn_name_base
if op.oc2 and s not in ['X87']:
if op.oc2 == 'v' and s[-1] == 'v':
pass
else:
s += op.oc2
elif op.type == 'reg':
s = op.bits.upper()
s = re.sub(r'XED_REG_','',s)
if op.oc2 and op.oc2 not in ['f80']:
s += op.oc2
elif op.type == 'imm_const':
if op.name in ['BCAST','SCALE']:
continue
s = op.name
if op.oc2:
s += op.oc2
else:
msgb("IFORM SKIPPING ","{} for {}".format(op, v.iclass))
if s:
tokens.append(s)
iform = v.iclass
if tokens:
iform += '_' + "_".join(tokens)
return iform
def _generate_missing_iforms(self):
for v in self.recs:
if v.iform == '' or not hasattr(v,'iform'):
v.iform = self._generate_one_iform(v)
def _generate_memop_rw_field(self):
for v in self.recs:
v.memop_rw = self._compute_memop_rw(v)
|
Apache License 2.0
|
weasyl/weasyl
|
weasyl/define.py
|
text_first_line
|
python
|
def text_first_line(target, strip=False):
first_line, _, rest = target.partition("\n")
if strip:
return rest
else:
return first_line
|
Return the first line of text; if `strip` is True, return all but the first
line of text.
|
https://github.com/weasyl/weasyl/blob/80c86942c6f20a815086e2895fdad51d3aa77eed/weasyl/define.py#L500-L510
|
import os
import time
import random
import hashlib
import itertools
import json
import numbers
import datetime
import pkgutil
from urllib.parse import urlencode, urljoin
import arrow
from pyramid.threadlocal import get_current_request
import pytz
import requests
import sqlalchemy as sa
import sqlalchemy.orm
from pyramid.response import Response
from sentry_sdk import capture_exception
from sqlalchemy.exc import OperationalError
from web.template import Template
import libweasyl.constants
from libweasyl.cache import region
from libweasyl.legacy import UNIXTIME_OFFSET as _UNIXTIME_OFFSET, get_sysname
from libweasyl.models.tables import metadata as meta
from libweasyl.models.users import DEFAULT_TIMEZONE
from libweasyl import html, text, ratings, staff
from weasyl import config
from weasyl import errorcode
from weasyl import macro
from weasyl.config import config_obj, config_read_setting
from weasyl.error import WeasylError
from weasyl.macro import MACRO_SUPPORT_ADDRESS
_shush_pyflakes = [sqlalchemy.orm]
reload_templates = bool(os.environ.get('WEASYL_RELOAD_TEMPLATES'))
reload_assets = bool(os.environ.get('WEASYL_RELOAD_ASSETS'))
def _load_resources():
global resource_paths
with open(os.path.join(macro.MACRO_APP_ROOT, 'build/rev-manifest.json'), 'r') as f:
resource_paths = json.loads(f.read())
_load_resources()
def record_timing(func):
return func
_sqlalchemy_url = config_obj.get('sqlalchemy', 'url')
if config._in_test:
_sqlalchemy_url += '_test'
engine = meta.bind = sa.create_engine(_sqlalchemy_url, pool_use_lifo=True, pool_size=2)
sessionmaker_future = sa.orm.sessionmaker(bind=engine, expire_on_commit=False)
sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine, autocommit=True))
def connect():
request = get_current_request()
if request is not None:
return request.pg_connection
return sessionmaker()
def execute(statement, argv=None):
db = connect()
if argv:
argv = tuple(argv)
for x in argv:
if type(x) is not int:
raise TypeError("can't use %r as define.execute() parameter" % (x,))
statement %= argv
query = db.connection().execute(statement)
if statement.lstrip()[:6] == "SELECT" or " RETURNING " in statement:
return query.fetchall()
else:
query.close()
def column(results):
return [x for x, in results]
_PG_SERIALIZATION_FAILURE = u'40001'
def serializable_retry(action, limit=16):
with engine.connect() as db:
db = db.execution_options(isolation_level='SERIALIZABLE')
for i in itertools.count(1):
try:
with db.begin():
return action(db)
except OperationalError as e:
if i == limit or e.orig.pgcode != _PG_SERIALIZATION_FAILURE:
raise
with open(os.path.join(macro.MACRO_APP_ROOT, "version.txt")) as f:
CURRENT_SHA = f.read().strip()
_template_cache = {}
def _compile(template_name):
template = _template_cache.get(template_name)
if template is None or reload_templates:
_template_cache[template_name] = template = Template(
pkgutil.get_data(__name__, 'templates/' + template_name).decode('utf-8'),
filename=template_name,
globals={
"STR": str,
"LOGIN": get_sysname,
"CSRF": (lambda: ""),
"USER_TYPE": user_type,
"DATE": convert_date,
"ISO8601": iso8601,
"ISO8601_DATE": iso8601_date,
"TIME": _convert_time,
"LOCAL_ARROW": local_arrow,
"PRICE": text_price_amount,
"SYMBOL": text_price_symbol,
"TITLE": titlebar,
"RENDER": render,
"COMPILE": _compile,
"MARKDOWN": text.markdown,
"MARKDOWN_EXCERPT": text.markdown_excerpt,
"SUMMARIZE": summarize,
"SHA": CURRENT_SHA,
"NOW": get_time,
"THUMB": thumb_for_sub,
"WEBP_THUMB": webp_thumb_for_sub,
"M": macro,
"R": ratings,
"SLUG": text.slug_for,
"QUERY_STRING": query_string,
"INLINE_JSON": html.inline_json,
"PATH": _get_path,
"arrow": arrow,
"constants": libweasyl.constants,
"getattr": getattr,
"json": json,
"sorted": sorted,
"staff": staff,
"resource_path": get_resource_path,
})
return template
def render(template_name, argv=()):
template = _compile(template_name)
return str(template(*argv))
def titlebar(title, backtext=None, backlink=None):
return render("common/stage_title.html", [title, backtext, backlink])
def errorpage_html(userid, message_html, links=None, request_id=None, **extras):
return webpage(userid, "error/error.html", [message_html, links, request_id], **extras)
def errorpage(userid, code=None, links=None, request_id=None, **extras):
if code is None:
code = errorcode.unexpected
return errorpage_html(userid, text.markdown(code), links, request_id, **extras)
def webpage(userid, template, argv=None, options=None, **extras):
if argv is None:
argv = []
if options is None:
options = []
page = common_page_start(userid, options=options, **extras)
page.append(render(template, argv))
return common_page_end(userid, page, options=options)
def get_userid():
return get_current_request().userid
_ORIGIN = config_obj.get('general', 'origin')
def is_csrf_valid(request):
return request.headers.get('origin') == _ORIGIN
@region.cache_on_arguments(namespace='v3')
def _get_all_config(userid):
row = engine.execute("""
SELECT EXISTS (SELECT FROM permaban WHERE permaban.userid = %(userid)s) AS is_banned,
EXISTS (SELECT FROM suspension WHERE suspension.userid = %(userid)s) AS is_suspended,
lo.voucher IS NOT NULL AS is_vouched_for,
pr.config AS profile_configuration,
pr.jsonb_settings
FROM login lo INNER JOIN profile pr USING (userid)
WHERE userid = %(userid)s
""", userid=userid).first()
return dict(row)
def get_config(userid):
if not userid:
return ""
return _get_all_config(userid)['profile_configuration']
def get_login_settings(userid):
r = _get_all_config(userid)
return r['is_banned'], r['is_suspended']
def is_vouched_for(userid):
return _get_all_config(userid)['is_vouched_for']
def get_profile_settings(userid):
from weasyl.profile import ProfileSettings
if not userid:
jsonb = {}
else:
jsonb = _get_all_config(userid)['jsonb_settings']
if jsonb is None:
jsonb = {}
return ProfileSettings(jsonb)
def get_rating(userid):
if not userid:
return ratings.GENERAL.code
if is_sfw_mode():
profile_settings = get_profile_settings(userid)
return profile_settings.max_sfw_rating
config = get_config(userid)
if 'p' in config:
return ratings.EXPLICIT.code
elif 'a' in config:
return ratings.MATURE.code
else:
return ratings.GENERAL.code
def get_config_rating(userid):
config = get_config(userid)
if 'p' in config:
max_rating = ratings.EXPLICIT.code
elif 'a' in config:
max_rating = ratings.MATURE.code
else:
max_rating = ratings.GENERAL.code
profile_settings = get_profile_settings(userid)
sfw_rating = profile_settings.max_sfw_rating
return max_rating, sfw_rating
def is_sfw_mode():
return get_current_request().cookies.get('sfwmode', "nsfw") == "sfw"
def get_premium(userid):
if not userid:
return False
config = get_config(userid)
return "d" in config
@region.cache_on_arguments()
@record_timing
def _get_display_name(userid):
return engine.scalar("SELECT username FROM profile WHERE userid = %(user)s", user=userid)
def get_display_name(userid):
if not userid:
return None
return _get_display_name(userid)
def get_int(target):
if target is None:
return 0
if isinstance(target, numbers.Number):
return int(target)
try:
return int("".join(i for i in target if i.isdigit()))
except ValueError:
return 0
def get_targetid(*argv):
for i in argv:
if i:
return i
def get_search_tag(target):
target = "".join(i for i in target if ord(i) < 128)
target = target.replace(" ", "_")
target = "".join(i for i in target if i.isalnum() or i in "_")
target = target.strip("_")
target = "_".join(i for i in target.split("_") if i)
return target.lower()
def get_time():
return int(time.time()) + _UNIXTIME_OFFSET
def get_timestamp():
return time.strftime("%Y-%m", time.localtime(get_time()))
def _get_hash_path(charid):
id_hash = hashlib.sha1(b"%i" % (charid,)).hexdigest()
return "/".join([id_hash[i:i + 2] for i in range(0, 11, 2)]) + "/"
def get_character_directory(charid):
return macro.MACRO_SYS_CHAR_PATH + _get_hash_path(charid)
@region.cache_multi_on_arguments(should_cache_fn=bool)
def _get_userids(*sysnames):
result = engine.execute(
"SELECT login_name, userid FROM login WHERE login_name = ANY (%(names)s)"
" UNION ALL SELECT alias_name, userid FROM useralias WHERE alias_name = ANY (%(names)s)"
" UNION ALL SELECT login_name, userid FROM username_history WHERE active AND login_name = ANY (%(names)s)",
names=list(sysnames),
)
sysname_userid = dict(result.fetchall())
return [sysname_userid.get(sysname, 0) for sysname in sysnames]
def get_userids(usernames):
ret = {}
lookup_usernames = []
sysnames = []
for username in usernames:
sysname = get_sysname(username)
if sysname:
lookup_usernames.append(username)
sysnames.append(sysname)
else:
ret[username] = 0
ret.update(zip(lookup_usernames, _get_userids(*sysnames)))
return ret
def get_userid_list(target):
usernames = target.split(";")
return [userid for userid in get_userids(usernames).values() if userid != 0]
def get_ownerid(submitid=None, charid=None, journalid=None):
if submitid:
return engine.scalar("SELECT userid FROM submission WHERE submitid = %(id)s", id=submitid)
if charid:
return engine.scalar("SELECT userid FROM character WHERE charid = %(id)s", id=charid)
if journalid:
return engine.scalar("SELECT userid FROM journal WHERE journalid = %(id)s", id=journalid)
def get_random_set(target, count):
return random.sample(target, min(count, len(target)))
def get_address():
request = get_current_request()
return request.client_addr
def _get_path():
return get_current_request().url
def text_price_amount(target):
return "%.2f" % (float(target) / 100.0)
def text_price_symbol(target):
from weasyl.commishinfo import CURRENCY_CHARMAP
for c in target:
if c in CURRENCY_CHARMAP:
return CURRENCY_CHARMAP[c].symbol
return CURRENCY_CHARMAP[''].symbol
|
Apache License 2.0
|
guillaume-chevalier/reubert
|
src/infrastructure/bert/modeling.py
|
BertModel.__init__
|
python
|
def __init__(
self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None
):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings
)
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob
)
with tf.variable_scope("encoder"):
attention_mask = create_attention_mask_from_input_mask(input_ids, input_mask)
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True
)
self.sequence_output = self.all_encoder_layers[-1]
with tf.variable_scope("pooler"):
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range)
)
|
Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
|
https://github.com/guillaume-chevalier/reubert/blob/86f115f651e0613047a7e319fdb0a5d9ec6f9292/src/infrastructure/bert/modeling.py#L134-L240
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
def __init__(
self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
|
MIT License
|
tomplus/kubernetes_asyncio
|
kubernetes_asyncio/client/models/v1_pod_spec.py
|
V1PodSpec.restart_policy
|
python
|
def restart_policy(self, restart_policy):
self._restart_policy = restart_policy
|
Sets the restart_policy of this V1PodSpec.
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501
:param restart_policy: The restart_policy of this V1PodSpec. # noqa: E501
:type: str
|
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_pod_spec.py#L735-L744
|
import pprint
import re
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1PodSpec(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active_deadline_seconds': 'int',
'affinity': 'V1Affinity',
'automount_service_account_token': 'bool',
'containers': 'list[V1Container]',
'dns_config': 'V1PodDNSConfig',
'dns_policy': 'str',
'enable_service_links': 'bool',
'ephemeral_containers': 'list[V1EphemeralContainer]',
'host_aliases': 'list[V1HostAlias]',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'hostname': 'str',
'image_pull_secrets': 'list[V1LocalObjectReference]',
'init_containers': 'list[V1Container]',
'node_name': 'str',
'node_selector': 'dict(str, str)',
'overhead': 'dict(str, str)',
'preemption_policy': 'str',
'priority': 'int',
'priority_class_name': 'str',
'readiness_gates': 'list[V1PodReadinessGate]',
'restart_policy': 'str',
'runtime_class_name': 'str',
'scheduler_name': 'str',
'security_context': 'V1PodSecurityContext',
'service_account': 'str',
'service_account_name': 'str',
'share_process_namespace': 'bool',
'subdomain': 'str',
'termination_grace_period_seconds': 'int',
'tolerations': 'list[V1Toleration]',
'topology_spread_constraints': 'list[V1TopologySpreadConstraint]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_config': 'dnsConfig',
'dns_policy': 'dnsPolicy',
'enable_service_links': 'enableServiceLinks',
'ephemeral_containers': 'ephemeralContainers',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'overhead': 'overhead',
'preemption_policy': 'preemptionPolicy',
'priority': 'priority',
'priority_class_name': 'priorityClassName',
'readiness_gates': 'readinessGates',
'restart_policy': 'restartPolicy',
'runtime_class_name': 'runtimeClassName',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'share_process_namespace': 'shareProcessNamespace',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'topology_spread_constraints': 'topologySpreadConstraints',
'volumes': 'volumes'
}
def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_config=None, dns_policy=None, enable_service_links=None, ephemeral_containers=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, hostname=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, overhead=None, preemption_policy=None, priority=None, priority_class_name=None, readiness_gates=None, restart_policy=None, runtime_class_name=None, scheduler_name=None, security_context=None, service_account=None, service_account_name=None, share_process_namespace=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, topology_spread_constraints=None, volumes=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active_deadline_seconds = None
self._affinity = None
self._automount_service_account_token = None
self._containers = None
self._dns_config = None
self._dns_policy = None
self._enable_service_links = None
self._ephemeral_containers = None
self._host_aliases = None
self._host_ipc = None
self._host_network = None
self._host_pid = None
self._hostname = None
self._image_pull_secrets = None
self._init_containers = None
self._node_name = None
self._node_selector = None
self._overhead = None
self._preemption_policy = None
self._priority = None
self._priority_class_name = None
self._readiness_gates = None
self._restart_policy = None
self._runtime_class_name = None
self._scheduler_name = None
self._security_context = None
self._service_account = None
self._service_account_name = None
self._share_process_namespace = None
self._subdomain = None
self._termination_grace_period_seconds = None
self._tolerations = None
self._topology_spread_constraints = None
self._volumes = None
self.discriminator = None
if active_deadline_seconds is not None:
self.active_deadline_seconds = active_deadline_seconds
if affinity is not None:
self.affinity = affinity
if automount_service_account_token is not None:
self.automount_service_account_token = automount_service_account_token
self.containers = containers
if dns_config is not None:
self.dns_config = dns_config
if dns_policy is not None:
self.dns_policy = dns_policy
if enable_service_links is not None:
self.enable_service_links = enable_service_links
if ephemeral_containers is not None:
self.ephemeral_containers = ephemeral_containers
if host_aliases is not None:
self.host_aliases = host_aliases
if host_ipc is not None:
self.host_ipc = host_ipc
if host_network is not None:
self.host_network = host_network
if host_pid is not None:
self.host_pid = host_pid
if hostname is not None:
self.hostname = hostname
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if init_containers is not None:
self.init_containers = init_containers
if node_name is not None:
self.node_name = node_name
if node_selector is not None:
self.node_selector = node_selector
if overhead is not None:
self.overhead = overhead
if preemption_policy is not None:
self.preemption_policy = preemption_policy
if priority is not None:
self.priority = priority
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if readiness_gates is not None:
self.readiness_gates = readiness_gates
if restart_policy is not None:
self.restart_policy = restart_policy
if runtime_class_name is not None:
self.runtime_class_name = runtime_class_name
if scheduler_name is not None:
self.scheduler_name = scheduler_name
if security_context is not None:
self.security_context = security_context
if service_account is not None:
self.service_account = service_account
if service_account_name is not None:
self.service_account_name = service_account_name
if share_process_namespace is not None:
self.share_process_namespace = share_process_namespace
if subdomain is not None:
self.subdomain = subdomain
if termination_grace_period_seconds is not None:
self.termination_grace_period_seconds = termination_grace_period_seconds
if tolerations is not None:
self.tolerations = tolerations
if topology_spread_constraints is not None:
self.topology_spread_constraints = topology_spread_constraints
if volumes is not None:
self.volumes = volumes
@property
def active_deadline_seconds(self):
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
self._active_deadline_seconds = active_deadline_seconds
@property
def affinity(self):
return self._affinity
@affinity.setter
def affinity(self, affinity):
self._affinity = affinity
@property
def automount_service_account_token(self):
return self._automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount_service_account_token):
self._automount_service_account_token = automount_service_account_token
@property
def containers(self):
return self._containers
@containers.setter
def containers(self, containers):
if self.local_vars_configuration.client_side_validation and containers is None:
raise ValueError("Invalid value for `containers`, must not be `None`")
self._containers = containers
@property
def dns_config(self):
return self._dns_config
@dns_config.setter
def dns_config(self, dns_config):
self._dns_config = dns_config
@property
def dns_policy(self):
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
self._dns_policy = dns_policy
@property
def enable_service_links(self):
return self._enable_service_links
@enable_service_links.setter
def enable_service_links(self, enable_service_links):
self._enable_service_links = enable_service_links
@property
def ephemeral_containers(self):
return self._ephemeral_containers
@ephemeral_containers.setter
def ephemeral_containers(self, ephemeral_containers):
self._ephemeral_containers = ephemeral_containers
@property
def host_aliases(self):
return self._host_aliases
@host_aliases.setter
def host_aliases(self, host_aliases):
self._host_aliases = host_aliases
@property
def host_ipc(self):
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
self._host_ipc = host_ipc
@property
def host_network(self):
return self._host_network
@host_network.setter
def host_network(self, host_network):
self._host_network = host_network
@property
def host_pid(self):
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
self._host_pid = host_pid
@property
def hostname(self):
return self._hostname
@hostname.setter
def hostname(self, hostname):
self._hostname = hostname
@property
def image_pull_secrets(self):
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
self._image_pull_secrets = image_pull_secrets
@property
def init_containers(self):
return self._init_containers
@init_containers.setter
def init_containers(self, init_containers):
self._init_containers = init_containers
@property
def node_name(self):
return self._node_name
@node_name.setter
def node_name(self, node_name):
self._node_name = node_name
@property
def node_selector(self):
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
self._node_selector = node_selector
@property
def overhead(self):
return self._overhead
@overhead.setter
def overhead(self, overhead):
self._overhead = overhead
@property
def preemption_policy(self):
return self._preemption_policy
@preemption_policy.setter
def preemption_policy(self, preemption_policy):
self._preemption_policy = preemption_policy
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, priority):
self._priority = priority
@property
def priority_class_name(self):
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
self._priority_class_name = priority_class_name
@property
def readiness_gates(self):
return self._readiness_gates
@readiness_gates.setter
def readiness_gates(self, readiness_gates):
self._readiness_gates = readiness_gates
@property
def restart_policy(self):
return self._restart_policy
@restart_policy.setter
|
Apache License 2.0
|
chilcote/unearth
|
artifacts/active_directory_dsbindtimeout.py
|
fact
|
python
|
def fact():
result = "None"
plist = "/Library/Preferences/com.apple.loginwindow.plist"
if plist and os.path.exists(plist):
result = CFPreferencesCopyAppValue("DSBindTimeout", plist)
return {factoid: str(result)}
|
Returns the dsbindtimeout setting
|
https://github.com/chilcote/unearth/blob/1aaa79195850aac8920efe2d632911d19d998fa3/artifacts/active_directory_dsbindtimeout.py#L8-L17
|
import os
from CoreFoundation import CFPreferencesCopyAppValue
factoid = "active_directory_dsbindtimeout"
|
Apache License 2.0
|
jamesgleave/deep-docking-nonautomated
|
phase_2-3/ML/DDModel.py
|
DDModel.__init__
|
python
|
def __init__(self, mode, input_shape, hyperparameters, metrics=None, loss='binary_crossentropy', regression=False,
name="model"):
if metrics is None:
self.metrics = ['accuracy']
else:
self.metrics = metrics
output_activation = 'linear' if regression else 'sigmoid'
self.loss_func = loss
if regression and loss == 'binary_crossentropy':
self.loss_func = 'mean_squared_error'
hyperparameters["loss_func"] = self.loss_func
if mode == "loaded_model":
super().__init__(hyperparameters={'bin_array': [],
'dropout_rate': 0,
'learning_rate': 0,
'num_units': 0,
'epsilon': 0},
output_activation=output_activation, name=name)
self.mode = ""
self.input_shape = ()
self.history = keras.callbacks.History()
self.time = {"training_time": -1, "prediction_time": -1}
else:
super().__init__(hyperparameters=hyperparameters,
output_activation=output_activation, name=name)
self.mode = mode
self.input_shape = input_shape
self.history = keras.callbacks.History()
self.time = {'training_time': -1, "prediction_time": -1}
self.model = self._create_model()
self._compile()
|
Parameters
----------
mode : str
A string indicating which model to use
input_shape : tuple or list
The input shape for the model
hyperparameters : dict
A dictionary containing the hyperparameters for the DDModel's model
metrics : list
The metric(s) used by keras
loss : str
The loss function used by keras
regression : bool
Set to true if the model is performing regression
|
https://github.com/jamesgleave/deep-docking-nonautomated/blob/a55178dd910e7827dafcc3e0ffaed12277df8307/phase_2-3/ML/DDModel.py#L26-L86
|
from .Tokenizer import DDTokenizer
from sklearn import preprocessing
from .DDModelExceptions import *
from tensorflow.keras import backend
from .Models import Models
from .Parser import Parser
import tensorflow as tf
import pandas as pd
import numpy as np
import keras
import time
import os
import warnings
warnings.filterwarnings('ignore')
class DDModel(Models):
|
MIT License
|
dedsecinside/awesome-scripts
|
APIs/Telegram API/telethon/network/connection/tcpfull.py
|
FullPacketCodec.__init__
|
python
|
def __init__(self, connection):
super().__init__(connection)
self._send_counter = 0
|
Initialize a connection.
Args:
self: (todo): write your description
connection: (todo): write your description
|
https://github.com/dedsecinside/awesome-scripts/blob/856835e5ff5f8a6af2d74bb25800c620feb712e3/APIs/Telegram API/telethon/network/connection/tcpfull.py#L11-L20
|
import struct
from zlib import crc32
from .connection import Connection, PacketCodec
from ...errors import InvalidChecksumError
class FullPacketCodec(PacketCodec):
tag = None
|
MIT License
|
simondlevy/breezycreate2
|
breezycreate2/__init__.py
|
Robot.playNote
|
python
|
def playNote(self, note, duration):
self.robot.play_note(note, duration)
|
Plays a specified note for a specified duration.
Notes are specified in MIDI format; e.g., "A#8", "C9".
Google MIDI TABLE for more info.
|
https://github.com/simondlevy/breezycreate2/blob/2fcbb79c06a11114004b640cbcb31c3fd34ed968/breezycreate2/__init__.py#L49-L55
|
import json
import serial
import struct
import warnings
import time
import pkg_resources
class Robot(object):
def __init__(self, port='/dev/ttyUSB0', baud=115200):
self.robot = _Create2(port, baud)
self.robot.start()
self.robot.safe()
def close(self):
self.robot.destroy()
|
MIT License
|
citelab/gini5
|
backend/src/gloader/xml/parsers/xmlproc/xmldtd.py
|
make_model
|
python
|
def make_model(cmhash,content_model,err):
cm=`content_model`
if cmhash.has_key(cm):
return cmhash[cm]
else:
content_model=make_objects(content_model)
builder=FNDABuilder()
content_model.add_states(builder)
content_model=fnda2fda(builder.get_automaton(),
builder.get_current_state(),
err)
cmhash[cm]=content_model
return content_model
|
Creates an FDA from the content model.
|
https://github.com/citelab/gini5/blob/1b023c2b98d68d921d98c110555aafec908d72a0/backend/src/gloader/xml/parsers/xmlproc/xmldtd.py#L817-L830
|
import types
from xmlutils import *
from xmlapp import *
class WFCDTD(DTDConsumer):
def __init__(self,parser):
DTDConsumer.__init__(self,parser)
self.dtd_listener=DTDConsumer(parser)
self.reset()
def reset(self):
self.gen_ents={}
self.param_ents={}
self.elems={}
self.attrinfo={}
self.used_notations={}
for name in predef_ents.keys():
self.new_general_entity(name,predef_ents[name])
def set_dtd_listener(self,listener):
self.dtd_listener=listener
def resolve_pe(self,name):
return self.param_ents[name]
def resolve_ge(self,name):
return self.gen_ents[name]
def get_general_entities(self):
return self.gen_ents.keys()
def get_parameter_entities(self):
return self.param_ents.keys()
def get_elem(self,name):
return self.elems[name]
def get_elements(self):
return self.elems.keys()
def get_notation(self,name):
raise KeyError(name)
def get_notations(self):
return []
def get_root_elem(self,name):
return None
def dtd_end(self):
self.attrinfo={}
for elem in self.elems.values():
self.attrinfo[elem.get_name()]=(elem.get_default_attributes(),
elem.get_fixed_attributes())
self.dtd_listener.dtd_end()
def get_element_info(self,name):
return self.attrinfo[name]
def new_attribute(self,elem,attr,a_type,a_decl,a_def):
self.dtd_listener.new_attribute(elem,attr,a_type,a_decl,a_def)
if not self.elems.has_key(elem):
self.elems[elem]=ElementTypeAny(elem)
self.elems[elem].add_attr(attr,a_type,a_decl,a_def,self.parser)
def dtd_start(self):
self.dtd_listener.dtd_start()
def handle_comment(self, contents):
self.dtd_listener.handle_comment(contents)
def handle_pi(self, target, data):
self.dtd_listener.handle_pi(target, data)
def new_general_entity(self,name,val):
if self.gen_ents.has_key(name):
return
ent=InternalEntity(name,val)
self.gen_ents[name]=ent
self.dtd_listener.new_general_entity(name,val)
def new_parameter_entity(self,name,val):
if self.param_ents.has_key(name):
return
ent=InternalEntity(name,val)
self.param_ents[name]=ent
self.dtd_listener.new_parameter_entity(name,val)
def new_external_entity(self,ent_name,pubid,sysid,ndata):
if self.gen_ents.has_key(ent_name):
return
if ndata != None and hasattr(self, "notations"):
if not self.notations.has_key(ndata):
self.used_notations[ndata]= ent_name
ent=ExternalEntity(ent_name,pubid,sysid,ndata)
self.gen_ents[ent_name]=ent
self.dtd_listener.new_external_entity(ent_name,pubid,sysid,ndata)
def new_external_pe(self,name,pubid,sysid):
if self.param_ents.has_key(name):
return
ent=ExternalEntity(name,pubid,sysid,"")
self.param_ents[name]=ent
self.dtd_listener.new_external_pe(name,pubid,sysid)
def new_comment(self,contents):
self.dtd_listener.new_comment(contents)
def new_pi(self,target,rem):
self.dtd_listener.new_pi(target,rem)
def new_notation(self,name,pubid,sysid):
self.dtd_listener.new_notation(name,pubid,sysid)
def new_element_type(self,elem_name,elem_cont):
self.dtd_listener.new_element_type(elem_name,elem_cont)
class CompleteDTD(WFCDTD):
def __init__(self,parser):
WFCDTD.__init__(self,parser)
def reset(self):
WFCDTD.reset(self)
self.notations={}
self.attlists={}
self.root_elem=None
self.cmhash={}
def get_root_elem(self):
return self.root_elem
def get_notation(self,name):
return self.notations[name]
def get_notations(self):
return self.notations.keys()
def dtd_end(self):
WFCDTD.dtd_end(self)
self.cmhash={}
for elem in self.attlists.keys():
self.parser.report_error(1006,elem)
self.attlists={}
for notation in self.used_notations.keys():
try:
self.get_notation(notation)
except KeyError:
self.parser.report_error(2022,(self.used_notations[notation],
notation))
self.used_notations={}
def new_notation(self,name,pubid,sysid):
self.notations[name]=(pubid,sysid)
self.dtd_listener.new_notation(name,pubid,sysid)
def new_element_type(self,elem_name,elem_cont):
if self.elems.has_key(elem_name):
self.parser.report_error(2012,elem_name)
return
if elem_cont=="EMPTY":
elem_cont=("",[],"")
self.elems[elem_name]=ElementType(elem_name,make_empty_model(),
elem_cont)
elif elem_cont=="ANY":
elem_cont=None
self.elems[elem_name]=ElementTypeAny(elem_name)
else:
model=make_model(self.cmhash,elem_cont,self.parser)
self.elems[elem_name]=ElementType(elem_name,model,elem_cont)
if self.attlists.has_key(elem_name):
for (attr,a_type,a_decl,a_def) in self.attlists[elem_name]:
self.elems[elem_name].add_attr(attr,a_type,a_decl,a_def, self.parser)
del self.attlists[elem_name]
self.dtd_listener.new_element_type(elem_name,elem_cont)
def new_attribute(self,elem,attr,a_type,a_decl,a_def):
self.dtd_listener.new_attribute(elem,attr,a_type,a_decl,a_def)
try:
self.elems[elem].add_attr(attr,a_type,a_decl,a_def,self.parser)
except KeyError:
try:
self.attlists[elem].append((attr,a_type,a_decl,a_def))
except KeyError:
self.attlists[elem]=[(attr,a_type,a_decl,a_def)]
class ElementType:
def __init__(self,name,compiled,original):
self.name=name
self.attrhash={}
self.attrlist=[]
self.content_model=compiled
self.content_model_structure=original
def get_name(self):
return self.name
def get_attr_list(self):
return self.attrlist
def get_attr(self,name):
return self.attrhash[name]
def add_attr(self,attr,a_type,a_decl,a_def,parser):
if self.attrhash.has_key(attr):
parser.report_error(1007,attr)
return
self.attrlist.append(attr)
if a_type=="ID":
for attr_name in self.attrhash.keys():
if self.attrhash[attr_name].type=="ID":
parser.report_error(2013)
if a_decl!="#REQUIRED" and a_decl!="#IMPLIED":
parser.report_error(2014)
elif type(a_type)==types.TupleType and a_type[0]=="NOTATION":
for notation in a_type[1]:
parser.dtd.used_notations[notation]=attr
self.attrhash[attr]=Attribute(attr,a_type,a_decl,a_def,parser)
if a_def!=None:
self.attrhash[attr].validate(self.attrhash[attr].default,parser)
def get_start_state(self):
return self.content_model["start"]
def final_state(self, state):
return self.content_model["final"] & state
def next_state(self, state, elem_name):
return self.content_model[state].get(elem_name, 0)
def next_state_skip(self, state, elem_name):
arcs = self.content_model[state]
for skipped in arcs.keys():
if self.content_model[arcs[skipped]].has_key(elem_name):
arcs2 = self.content_model[arcs[skipped]]
return (arcs2[elem_name], skipped)
def get_valid_elements(self, state):
if self.content_model == None:
return []
try:
return self.content_model[state].keys()
except KeyError:
return []
def get_content_model(self):
return self.content_model_structure
def get_default_attributes(self):
defs={}
for attr in self.attrhash.values():
if attr.get_default()!=None:
defs[attr.get_name()]=attr.get_default()
return defs
def get_fixed_attributes(self):
fixed={}
for attr in self.attrhash.values():
if attr.get_decl()=="#FIXED":
fixed[attr.get_name()]=attr.get_default()
return fixed
class ElementTypeAny(ElementType):
def __init__(self,name):
ElementType.__init__(self,name,None,None)
def get_start_state(self):
return 1
def final_state(self,state):
return 1
def next_state(self,state,elem_name):
return 1
def get_valid_elements(self, state):
return []
class Attribute:
def __init__(self,name,attrtype,decl,default,parser):
self.name=name
self.type=attrtype
self.decl=decl
if default!=None and self.type!="CDATA":
self.default=string.join(string.split(default))
else:
self.default=default
if name=="xml:space":
error = 0
if type(self.type) in StringTypes:
parser.report_error(2015)
return
if len(self.type) < 1 or len(self.type) > 2:
error = 1
else:
for alt in self.type:
if alt not in ["default", "preserve"]:
error = 1
if error:
parser.report_error(2016)
def validate(self,value,parser):
if type(self.type) not in StringTypes:
for val in self.type:
if val==value: return
parser.report_error(2017,(value,self.name))
elif self.type=="CDATA":
return
elif self.type=="ID" or self.type=="IDREF" or self.type=="ENTITIY":
if not matches(reg_name,value):
parser.report_error(2018,self.name)
elif self.type=="NMTOKEN":
if not matches(reg_nmtoken,value):
parser.report_error(2019,self.name)
elif self.type=="NMTOKENS":
if not matches(reg_nmtokens,value):
parser.report_error(2020,self.name)
elif self.type=="IDREFS" or self.type=="ENTITIES":
for token in string.split(value):
if not matches(reg_name,token):
parser.report_error(2021,(token,self.name))
def get_name(self):
return self.name
def get_type(self):
return self.type
def get_decl(self):
return self.decl
def get_default(self):
return self.default
class InternalEntity:
def __init__(self,name,value):
self.name=name
self.value=value
def is_internal(self):
return 1
def get_value(self):
return self.value
class ExternalEntity:
def __init__(self,name,pubid,sysid,notation):
self.name=name
self.pubid=pubid
self.sysid=sysid
self.notation=notation
def is_parsed(self):
return self.notation==""
def is_internal(self):
return 0
def get_pubid(self):
return self.pubid
def get_sysid(self):
return self.sysid
def get_notation(self):
return self.notation
class FNDABuilder:
def __init__(self):
self.__current=0
self.__transitions=[[]]
self.__mem=[]
def remember_state(self):
self.__mem.append(self.__current)
def set_current_to_remembered(self):
self.__current=self.__mem[-1]
def forget_state(self):
del self.__mem[-1]
def new_state(self):
self.__transitions.append([])
self.__current=len(self.__transitions)-1
def get_automaton(self):
return self.__transitions
def get_current_state(self):
return self.__current
def new_transition(self,label,frm,to):
self.__transitions[frm].append((to,label))
def new_transition_to_new(self,label):
self.remember_state()
self.new_state()
self.__transitions[self.__mem[-1]].append((self.__current,label))
self.forget_state()
def new_transition_cur2rem(self,label):
self.__transitions[self.__current].append((self.__mem[-1],label))
def new_transition_rem2cur(self,label):
self.__transitions[self.__mem[-1]].append((self.__current,label))
def new_transition_2cur(self,frm,label):
self.__transitions[frm].append((self.__current,label))
class ContentModel:
def __init__(self,contents,modifier):
self.contents=contents
self.modifier=modifier
def add_states(self,builder):
if self.modifier=="?":
builder.remember_state()
self.add_contents(builder)
builder.new_transition_rem2cur("")
builder.forget_state()
elif self.modifier=="+":
self.add_contents(builder)
builder.remember_state()
self.add_contents(builder,1)
builder.set_current_to_remembered()
builder.forget_state()
elif self.modifier=="*":
builder.remember_state()
builder.new_transition_to_new("")
self.add_contents(builder,1)
builder.new_transition_rem2cur("")
builder.forget_state()
else:
self.add_contents(builder)
def add_contents(self,builder,loop=0):
if type(self.contents[0])==types.InstanceType:
if loop:
builder.remember_state()
self.contents[0].add_states(builder)
builder.new_transition_cur2rem("")
builder.set_current_to_remembered()
builder.forget_state()
else:
self.contents[0].add_states(builder)
else:
if loop:
builder.new_transition(self.contents[0],
builder.get_current_state(),
builder.get_current_state())
else:
builder.new_transition_to_new(self.contents[0])
class SeqContentModel(ContentModel):
def add_contents(self,builder,loop=0):
if loop:
builder.remember_state()
for cp in self.contents:
cp.add_states(builder)
if loop:
builder.new_transition_cur2rem("")
builder.forget_state()
class ChoiceContentModel(ContentModel):
def add_contents(self,builder,loop=0):
builder.remember_state()
end_states=[]
for cp in self.contents:
builder.new_state()
builder.new_transition_rem2cur("")
cp.add_states(builder)
end_states.append(builder.get_current_state())
builder.new_state()
for state in end_states:
builder.new_transition_2cur(state,"")
if loop:
builder.new_transition_cur2rem("")
builder.forget_state()
def hash(included):
no=0
exp=1L
for state in included:
if state:
no=no+exp
exp=exp*2L
return no
def fnda2fda(transitions,final_state,parser):
transitions.append([])
new_states={}
closure_hash={}
start_state=[0]*len(transitions)
compute_closure(0,start_state,transitions)
state_key=hash(start_state)
closure_hash[0]=state_key
add_transitions(0,transitions,new_states,start_state,state_key,parser,
closure_hash)
states=new_states.keys()
states.sort()
for state in states:
if state % 2==1:
new_states["start"]=state
break
new_states["final"]=pow(2L,final_state)
return new_states
def add_transitions(ix,transitions,new_states,cur_state_list,state_key,parser,
closure_hash):
new_states[state_key]={}
new_trans={}
no=0
for old_state in cur_state_list:
if old_state:
for (to,what) in transitions[no]:
if what!="":
if new_trans.has_key(what):
new_trans[what].append(to)
else:
new_trans[what]=[to]
no=no+1
for (over,destlist) in new_trans.items():
if len(destlist)==1 and closure_hash.has_key(destlist[0]):
new_state=closure_hash[destlist[0]]
else:
new_inc=[0]*len(transitions)
for to in destlist:
compute_closure(to,new_inc,transitions)
new_state=hash(new_inc)
if len(destlist)==1:
closure_hash[destlist[0]]=new_state
new_states[state_key][over]=new_state
if not new_states.has_key(new_state):
add_transitions(to,transitions,new_states,new_inc, new_state,parser,closure_hash)
def compute_closure(ix,included,transitions):
included[ix]=1
for (to,what) in transitions[ix]:
if what=="" and not included[to]:
compute_closure(to,included,transitions)
def print_trans(model):
ix=0
for transitions in model:
print "STATE: %d" % ix
for step in transitions:
print " TO %d OVER %s" % step
ix=ix+1
raw_input()
def print_states(states,stop=0):
assert not (states.has_key("start") or states.has_key("final"))
for trans_key in states.keys():
trans=states[trans_key]
print "State: "+`trans_key`
for (to,what) in trans:
try:
print " To: "+`to`+" over: "+what
except TypeError:
print "ERROR: "+`what`
if stop>1:
raw_input()
if stop:
raw_input()
def make_empty_model():
return { 1:{}, "final":1, "start":1 }
|
MIT License
|
shylent/python-tx-tftp
|
tftp/backend.py
|
IWriter.finish
|
python
|
def finish():
|
Tell this writer, that there will be no more data and that the transfer
was successfully completed
|
https://github.com/shylent/python-tx-tftp/blob/bcebd3be7521f726bc4769e351e9fbf203576c5e/tftp/backend.py#L106-L110
|
from os import fstat
from tftp.errors import Unsupported, FileExists, AccessViolation, FileNotFound
from tftp.util import deferred
from twisted.python.filepath import FilePath, InsecurePath
import shutil
import tempfile
from zope import interface
class IBackend(interface.Interface):
def get_reader(file_name):
def get_writer(file_name):
class IReader(interface.Interface):
size = interface.Attribute(
"The size of the file to be read, or C{None} if it's not known.")
def read(size):
def finish():
class IWriter(interface.Interface):
def write(data):
|
MIT License
|
google/dspl
|
tools/dspltools/packages/dspllib/data_sources/data_source.py
|
DataSource.__init__
|
python
|
def __init__(self, data_source_identifier, verbose=True):
pass
|
Create a new DataSource object using the argument identifier.
Args:
data_source_identifier: An object used to identify the data source (e.g.,
string path to CSV file, URL, etc.)
verbose: Print out status messages to stdout
|
https://github.com/google/dspl/blob/db79dad685276dbf98ca44b875d1481bc240c5c1/tools/dspltools/packages/dspllib/data_sources/data_source.py#L295-L303
|
__author__ = 'Benjamin Yolken <yolken@google.com>'
import re
class DataSourceError(Exception):
pass
class DataSourceWarning(Warning):
pass
def GuessDataType(value, column_id=None):
stripped_value = value.strip().replace('"', '')
if re.search('^-?[0-9]+$', stripped_value):
if column_id == 'year':
return 'date'
else:
return 'integer'
elif re.search('^-?[0-9]+\.[0-9]+$', stripped_value):
return 'float'
elif re.search('^[0-9]+(/|-)[0-9]+((/|-)[0-9]+){0,1}$', stripped_value):
return 'date'
else:
return 'string'
def GuessDateFormat(value):
stripped_value = value.strip().replace('"', '')
year_match = re.search('^[0-9]{4}$', stripped_value)
if year_match:
return 'yyyy'
month_year_match = re.search(
'^[0-9]{1,2}(?P<separator>/|-)[0-9]{4}$', stripped_value)
if month_year_match:
return 'MM%syyyy' % month_year_match.groupdict()['separator']
year_month_match = re.search(
'^[0-9]{4}(?P<separator>/|-)[0-9]{1,2}$', stripped_value)
if year_month_match:
return 'yyyy%sMM' % year_month_match.groupdict()['separator']
month_day_year_match = re.search(
'^[0-9]{1,2}(?P<separator>/|-)'
'[0-9]{1,2}(?P=separator)[0-9]{4}$', stripped_value)
if month_day_year_match:
return 'MM%sdd%syyyy' % (
month_day_year_match.groupdict()['separator'],
month_day_year_match.groupdict()['separator'])
year_month_day_match = re.search(
'^[0-9]{4}(?P<separator>/|-)'
'[0-9]{1,2}(?P=separator)[0-9]{1,2}$', stripped_value)
if year_month_day_match:
return 'yyyy%sMM%sdd' % (
year_month_day_match.groupdict()['separator'],
year_month_day_match.groupdict()['separator'])
raise DataSourceError(
'Can\'t figure out date format for value: %s' % stripped_value)
def GuessDateConcept(data_format):
stripped_format = data_format.strip()
if re.search('^y+$', stripped_format):
return 'time:year'
elif re.search('^([yM]|[^a-zA-Z0-9])+$', stripped_format):
return 'time:month'
elif re.search('^([yMd]|[^a-zA-Z0-9])+$', stripped_format):
return 'time:day'
else:
raise DataSourceError(
'Can\'t figure out time concept for format: %s' % data_format)
class DataSourceColumnBundle(object):
def __init__(self, columns=()):
self.columns = list(columns)
self.column_dict = {}
for column in self.columns:
self.column_dict[column.column_id] = column
def AddColumn(self, column):
self.columns.append(column)
self.column_dict[column.column_id] = column
def GetColumnByID(self, column_id):
return self.column_dict[column_id]
def GetColumnByOrder(self, column_order):
return self.columns[column_order]
def GetColumnIterator(self):
return self.columns.__iter__()
def GetNumColumns(self):
return len(self.columns)
class DataSourceColumn(object):
def __init__(
self, column_id, data_type='', data_format='', concept_ref='',
concept_extension='', parent_ref='', slice_role='', rollup=False,
total_val='', internal_parameters=None):
self.column_id = column_id
self.data_type = data_type
self.data_format = data_format
self.concept_ref = concept_ref
self.concept_extension = concept_extension
self.parent_ref = parent_ref
self.slice_role = slice_role
self.rollup = rollup
self.total_val = total_val
self.internal_parameters = internal_parameters
class QueryParameters(object):
CONCEPT_QUERY = 0
SLICE_QUERY = 1
def __init__(self, query_type, column_ids=()):
self.query_type = query_type
self.column_ids = tuple(column_ids)
class TableData(object):
def __init__(self, rows=()):
self.rows = list(rows)
def MergeValues(self, join_source, num_columns=1):
assert len(self.rows) == len(join_source.rows)
for r, row in enumerate(self.rows):
self.rows[r] = row + join_source.rows[r][0:num_columns]
return self
def MergeConstant(self, constant_value):
for r, row in enumerate(self.rows):
self.rows[r] = row + [constant_value]
return self
class DataSource(object):
|
BSD 3-Clause New or Revised License
|
virustotal/vt-py
|
vt/client.py
|
Client.get_json
|
python
|
def get_json(self, path, *path_args, params=None):
return make_sync(self.get_json_async(path, *path_args, params=params))
|
Sends a GET request to a given API endpoint and parses the response.
Most VirusTotal API responses are JSON-encoded. This function parses the
JSON, check for errors, and return the server response as a dictionary.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
A dictionary with the backend's response.
|
https://github.com/virustotal/vt-py/blob/4912a1175082e66ccba90191018c425260a28788/vt/client.py#L402-L417
|
import aiohttp
import asyncio
import base64
import json
import io
from .error import APIError
from .feed import Feed
from .iterator import Iterator
from .object import Object
from .utils import make_sync
from .version import __version__
__all__ = [
'Client',
'ClientResponse',
'url_id']
_API_HOST = 'https://www.virustotal.com'
_ENDPOINT_PREFIX = '/api/v3'
_USER_AGENT_FMT = '{agent}; vtpy {version}; gzip'
def url_id(url):
return base64.urlsafe_b64encode(url.encode()).decode().strip("=")
class ClientResponse:
def __init__(self, aiohttp_resp):
self._aiohttp_resp = aiohttp_resp
def __getattr__(self, attr):
return getattr(self._aiohttp_resp, attr)
@property
def content(self):
return StreamReader(self._aiohttp_resp.content)
async def _get_chunked_response(self):
buffer = b""
async for data, _ in self.content.iter_chunks():
buffer += data
return buffer
async def read_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
return await self._get_chunked_response()
else:
return await self._aiohttp_resp.read()
def read(self):
return make_sync(self.read_async())
async def json_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
response_content = await self._get_chunked_response()
return json.loads(response_content)
else:
return await self._aiohttp_resp.json()
def json(self):
return make_sync(self.json_async())
async def text_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
response_content = await self._get_chunked_response()
return response_content.decode(self._aiohttp_resp.get_encoding())
else:
return await self._aiohttp_resp.text()
def text(self):
return make_sync(self.text_async())
class StreamReader:
def __init__(self, aiohttp_stream_reader):
self._aiohttp_stream_reader = aiohttp_stream_reader
def __getattr__(self, attr):
return getattr(self._aiohttp_stream_reader, attr)
async def read_async(self, n=-1):
return await self._aiohttp_stream_reader.read(n)
def read(self, n=-1):
return make_sync(self.read_async(n))
async def readany_async(self):
return await self._aiohttp_stream_reader.readany()
def readany(self):
return make_sync(self.readany_async())
async def readexactly_async(self, n):
return await self._aiohttp_stream_reader.readexactly(n)
def readexactly(self, n):
return make_sync(self.readexactly_async(n))
async def readline_async(self):
return await self._aiohttp_stream_reader.readline()
def readline(self):
return make_sync(self.readline_async())
async def readchunk_async(self):
return await self._aiohttp_stream_reader.readchunk()
def readchunk(self):
return make_sync(self.readchunk_async())
class Client:
def __init__(self, apikey, agent="unknown", host=None, trust_env=False,
timeout=300):
if not isinstance(apikey, str):
raise ValueError('API key must be a string')
if not apikey:
raise ValueError('API key can not be an empty string')
self._host = host or _API_HOST
self._apikey = apikey
self._agent = agent
self._session = None
self._trust_env = trust_env
self._timeout = timeout
def _full_url(self, path, *args):
try:
path = path.format(*args)
except IndexError:
raise ValueError('Not enough arguments to fill all placeholders in path')
if path.startswith('http'):
return path
return self._host + _ENDPOINT_PREFIX + path
def _get_session(self):
if not self._session:
self._session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False),
headers={
'X-Apikey': self._apikey,
'Accept-Encoding': 'gzip',
'User-Agent': _USER_AGENT_FMT.format_map({
'agent': self._agent, 'version': __version__})},
trust_env=self._trust_env,
timeout=aiohttp.ClientTimeout(total=self._timeout))
return self._session
async def __aenter__(self):
return self
async def __aexit__(self, type, value, traceback):
await self.close_async()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _extract_data_from_json(self, json_response):
if not 'data' in json_response:
raise ValueError('response does not returns a data field')
return json_response['data']
async def _response_to_json(self, response):
error = await self.get_error_async(response)
if error:
raise error
return await response.json_async()
async def _response_to_object(self, response):
json_response = await self._response_to_json(response)
try:
return Object.from_dict(self._extract_data_from_json(json_response))
except ValueError as err:
raise ValueError(f'response is not an object: {err}')
async def close_async(self):
if self._session:
await self._session.close()
self._session = None
def close(self):
return make_sync(self.close_async())
def delete(self, path, *path_args):
return make_sync(self.delete_async(path, *path_args))
async def delete_async(self, path, *path_args):
return ClientResponse(
await self._get_session().delete(self._full_url(path, *path_args)))
def download_file(self, hash, file):
return make_sync(self.download_file_async(hash, file))
async def download_file_async(self, hash, file):
response = await self.get_async(f'/files/{hash}/download')
error = await self.get_error_async(response)
if error:
raise error
while True:
chunk = await response.content.read_async(1024*1024)
if not chunk:
break
file.write(chunk)
def feed(self, feed_type, cursor=None):
return Feed(self, feed_type, cursor=cursor)
def get(self, path, *path_args, params=None):
return make_sync(self.get_async(path, *path_args, params=params))
async def get_async(self, path, *path_args, params=None):
return ClientResponse(
await self._get_session().get(
self._full_url(path, *path_args),
params=params))
def get_data(self, path, *path_args, params=None):
return make_sync(self.get_data_async(path, *path_args, params=params))
async def get_data_async(self, path, *path_args, params=None):
json_response = await self.get_json_async(path, *path_args, params=params)
return self._extract_data_from_json(json_response)
async def get_error_async(self, response):
if response.status == 200:
return None
if response.status >= 400 and response.status <= 499:
if response.content_type == 'application/json':
json_response = await response.json_async()
error = json_response.get('error')
if error:
return APIError.from_dict(error)
return APIError('ClientError', await response.text_async())
return APIError('ServerError', await response.text_async())
|
Apache License 2.0
|
danielholmstrom/flask-alchemyview
|
flask_alchemyview.py
|
AlchemyView.get
|
python
|
def get(self, id):
return self._response(self._get_item(id).
asdict(**(getattr(self, 'asdict_params',
self.dict_params or None)
or {})), 'get')
|
Handles GET requests
|
https://github.com/danielholmstrom/flask-alchemyview/blob/495c070ec3a53e8e9a2dd68ba1ed889ddd31833c/flask_alchemyview.py#L443-L448
|
from __future__ import absolute_import, division
import re
import os
import json
import datetime
import decimal
import logging
import traceback
import colander
from sqlalchemy.exc import IntegrityError
from flask import (Response,
url_for,
abort,
request,
redirect,
render_template,
current_app,
)
from flask.ext.classy import FlaskView
from werkzeug.exceptions import HTTPException
from jinja2.exceptions import TemplateNotFound
def _gettext(msg, *args, **kwargs):
return re.sub(r'%\(([a-z0-9_]+)\)', r'{\1}', msg).format(*args,
**kwargs)
try:
from flask.ext.babel import gettext
_ = gettext
except ImportError:
_ = _gettext
_logger = logging.getLogger('flask.ext.alchemyview')
def _remove_colander_null(result):
if isinstance(result, dict):
rc = {}
for (k, v) in result.iteritems():
if isinstance(v, dict) or isinstance(v, list):
rc[k] = _remove_colander_null(v)
else:
if v is not colander.null:
rc[k] = v
return rc
elif isinstance(result, list):
return [v for v in result if v is not colander.null]
else:
raise Exception("Argument 'result' is not dict or list(%r)" %
type(result))
def _exception_to_dict(error):
if isinstance(error, IntegrityError):
m = re.search(r'(Key) \((\w+)\)=\(([^)]*)\) already exists',
str(error.orig))
if m:
return {u'message': _(u"'%(key)' already exists", key=m.group(2)),
'errors': {m.group(2): _(u'Already exists')}}
elif isinstance(error, colander.Invalid):
return {u'errors': error.asdict(),
u"message": _("Invalid Data")}
_logger.debug('ecom.utils._exception_to_dict:'
'Got unhandled error: %r:%s\nTraceback: %s' %
(error, str(error),
traceback.format_exc()))
return {u'message': _(u'Unknown error'), u'errors': {}}
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
elif isinstance(obj, (decimal.Decimal)):
return unicode(obj)
elif hasattr(obj, 'asdict') and callable(getattr(obj, 'asdict')):
return obj.asdict()
else:
return json.JSONEncoder.default(self, obj)
class BadRequest(HTTPException):
def __init__(self, code, data):
if isinstance(data, Exception):
self.data = _exception_to_dict(data)
else:
self.data = (data[u'message']
if u'message' in data
else _(u'Unknown error'))
self.data = data
self.code = code
super(BadRequest, self).__init__(self.data[u'message'])
class AlchemyView(FlaskView):
JSONEncoder = _JSONEncoder
session = None
model = None
schema = None
update_schema = None
create_schema = None
dict_params = None
asdict_params = None
fromdict_params = None
max_page_limit = 50
page_limit = 10
sortby = None
sort_direction = 'asc'
sortby_map = None
template_suffixes = {'text/html': 'jinja2'}
def _json_dumps(self, obj, ensure_ascii=False, **kwargs):
kwargs['ensure_ascii'] = ensure_ascii
kwargs['cls'] = self.JSONEncoder
return json.dumps(obj, **kwargs)
def _json_loads(self, string, **kwargs):
return json.loads(string, **kwargs)
def _json_response(self, obj, status=200):
if isinstance(obj, Exception):
if status < 400:
status = 400
obj = _exception_to_dict(obj)
return Response(self._json_dumps(obj),
status=status,
mimetype='application/json')
def _base_query(self):
return self._get_session().query(self.model)
def _item_url(self, item):
if len(self.model.__table__.primary_key) != 1:
raise Exception("AlchemyView doesn't handle models with "
"composite primary key")
primary_key = [(column.name, column.type.python_type)
for column in self.model.__table__.primary_key]
primary_key_name = primary_key[0][0]
return url_for(self.build_route_name('get'),
id=getattr(item, primary_key_name))
def _get_item(self, id):
primary_key = [(column.name, column.type.python_type)
for column in self.model.__table__.primary_key]
if len(primary_key) != 1:
raise Exception("AlchemyView doesn't handle models with "
"composite primary key")
primary_key_type = primary_key[0][1]
primary_key_name = primary_key[0][0]
if primary_key_type not in (int, str, unicode):
raise Exception("AlchemyView can only handle int and string "
"primary keys not %r" % primary_key_type)
try:
if type(id) != primary_key_type:
id = primary_key_type(id)
except:
abort(404)
item = self._base_query().filter(
getattr(self.model,
primary_key_name) == id).limit(1).first()
if not item:
abort(404)
return item
def _get_session(self):
return self.session or current_app.extensions['sqlalchemy'].db.session
def _get_schema(self, data):
return self.schema()
def _get_create_schema(self, data):
if getattr(self, 'create_schema', None):
return self.create_schema()
else:
return self._get_schema(data)
def _get_update_schema(self, data):
if getattr(self, 'update_schema', None):
return self.update_schema()
else:
return self._get_schema(data)
def _get_response_mimetype(self):
best = request.accept_mimetypes.best_match(['application/json',
'text/html'])
if best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']:
return 'application/json'
else:
return 'text/html'
def _get_template_name(self, name, mimetype):
return os.path.join(self.get_route_base(),
'%s.%s' % (name,
self.template_suffixes[mimetype]))
def _response(self, data, template, status=200):
mimetype = self._get_response_mimetype()
if mimetype == 'application/json':
return self._json_response(data, status)
else:
if isinstance(data, Exception):
if status < 400:
status = 400
if status >= 400:
raise BadRequest(status, data)
else:
fn_name = 'before_%s_render' % template
if hasattr(self, fn_name) and callable(getattr(self, fn_name)):
kwargs = getattr(self, fn_name)(data) or {}
else:
kwargs = {}
try:
return render_template(self._get_template_name(template,
mimetype),
data=data,
**kwargs)
except TemplateNotFound:
raise BadRequest(406, {'message':
_('Not a valid Accept-Header')})
|
MIT License
|
yhhhli/brecq
|
models/regnet.py
|
regnety_4000m
|
python
|
def regnety_4000m(**kwargs):
model = RegNet(regnetY_4000M_config, **kwargs)
return model
|
Constructs a RegNet-Y model under 4000M FLOPs.
|
https://github.com/yhhhli/brecq/blob/e455d62e93c70351961f8991c913b59435bd165f/models/regnet.py#L442-L447
|
import numpy as np
import torch.nn as nn
import math
regnetX_200M_config = {'WA': 36.44, 'W0': 24, 'WM': 2.49, 'DEPTH': 13, 'GROUP_W': 8, 'SE_ON': False}
regnetX_400M_config = {'WA': 24.48, 'W0': 24, 'WM': 2.54, 'DEPTH': 22, 'GROUP_W': 16, 'SE_ON': False}
regnetX_600M_config = {'WA': 36.97, 'W0': 48, 'WM': 2.24, 'DEPTH': 16, 'GROUP_W': 24, 'SE_ON': False}
regnetX_800M_config = {'WA': 35.73, 'W0': 56, 'WM': 2.28, 'DEPTH': 16, 'GROUP_W': 16, 'SE_ON': False}
regnetX_1600M_config = {'WA': 34.01, 'W0': 80, 'WM': 2.25, 'DEPTH': 18, 'GROUP_W': 24, 'SE_ON': False}
regnetX_3200M_config = {'WA': 26.31, 'W0': 88, 'WM': 2.25, 'DEPTH': 25, 'GROUP_W': 48, 'SE_ON': False}
regnetX_4000M_config = {'WA': 38.65, 'W0': 96, 'WM': 2.43, 'DEPTH': 23, 'GROUP_W': 40, 'SE_ON': False}
regnetX_6400M_config = {'WA': 60.83, 'W0': 184, 'WM': 2.07, 'DEPTH': 17, 'GROUP_W': 56, 'SE_ON': False}
regnetY_200M_config = {'WA': 36.44, 'W0': 24, 'WM': 2.49, 'DEPTH': 13, 'GROUP_W': 8, 'SE_ON': True}
regnetY_400M_config = {'WA': 27.89, 'W0': 48, 'WM': 2.09, 'DEPTH': 16, 'GROUP_W': 8, 'SE_ON': True}
regnetY_600M_config = {'WA': 32.54, 'W0': 48, 'WM': 2.32, 'DEPTH': 15, 'GROUP_W': 16, 'SE_ON': True}
regnetY_800M_config = {'WA': 38.84, 'W0': 56, 'WM': 2.4, 'DEPTH': 14, 'GROUP_W': 16, 'SE_ON': True}
regnetY_1600M_config = {'WA': 20.71, 'W0': 48, 'WM': 2.65, 'DEPTH': 27, 'GROUP_W': 24, 'SE_ON': True}
regnetY_3200M_config = {'WA': 42.63, 'W0': 80, 'WM': 2.66, 'DEPTH': 21, 'GROUP_W': 24, 'SE_ON': True}
regnetY_4000M_config = {'WA': 31.41, 'W0': 96, 'WM': 2.24, 'DEPTH': 22, 'GROUP_W': 64, 'SE_ON': True}
regnetY_6400M_config = {'WA': 33.22, 'W0': 112, 'WM': 2.27, 'DEPTH': 25, 'GROUP_W': 72, 'SE_ON': True}
BN = nn.BatchNorm2d
__all__ = ['regnetx_200m', 'regnetx_400m', 'regnetx_600m', 'regnetx_800m',
'regnetx_1600m', 'regnetx_3200m', 'regnetx_4000m', 'regnetx_6400m',
'regnety_200m', 'regnety_400m', 'regnety_600m', 'regnety_800m',
'regnety_1600m', 'regnety_3200m', 'regnety_4000m', 'regnety_6400m']
class SimpleStemIN(nn.Module):
def __init__(self, in_w, out_w):
super(SimpleStemIN, self).__init__()
self._construct(in_w, out_w)
def _construct(self, in_w, out_w):
self.conv = nn.Conv2d(
in_w, out_w, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = BN(out_w)
self.relu = nn.ReLU(True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SE(nn.Module):
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self._construct(w_in, w_se)
def _construct(self, w_in, w_se):
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, kernel_size=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(w_se, w_in, kernel_size=1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class BottleneckTransform(nn.Module):
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
super(BottleneckTransform, self).__init__()
self._construct(w_in, w_out, stride, bm, gw, se_r)
def _construct(self, w_in, w_out, stride, bm, gw, se_r):
w_b = int(round(w_out * bm))
num_gs = w_b // gw
self.a = nn.Conv2d(w_in, w_b, kernel_size=1, stride=1, padding=0, bias=False)
self.a_bn = BN(w_b)
self.a_relu = nn.ReLU(True)
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3, stride=stride, padding=1, groups=num_gs, bias=False
)
self.b_bn = BN(w_b)
self.b_relu = nn.ReLU(True)
if se_r:
w_se = int(round(w_in * se_r))
self.se = SE(w_b, w_se)
self.c = nn.Conv2d(w_b, w_out, kernel_size=1, stride=1, padding=0, bias=False)
self.c_bn = BN(w_out)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBottleneckBlock(nn.Module):
def __init__(self, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
super(ResBottleneckBlock, self).__init__()
self._construct(w_in, w_out, stride, bm, gw, se_r)
def _add_skip_proj(self, w_in, w_out, stride):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False
)
self.bn = BN(w_out)
def _construct(self, w_in, w_out, stride, bm, gw, se_r):
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride)
self.f = BottleneckTransform(w_in, w_out, stride, bm, gw, se_r)
self.relu = nn.ReLU(True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class AnyHead(nn.Module):
def __init__(self, w_in, nc):
super(AnyHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class AnyStage(nn.Module):
def __init__(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
super(AnyStage, self).__init__()
self._construct(w_in, w_out, stride, d, block_fun, bm, gw, se_r)
def _construct(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
for i in range(d):
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
self.add_module(
"b{}".format(i + 1), block_fun(b_w_in, w_out, b_stride, bm, gw, se_r)
)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class AnyNet(nn.Module):
def __init__(self, **kwargs):
super(AnyNet, self).__init__()
if kwargs:
self._construct(
stem_w=kwargs["stem_w"],
ds=kwargs["ds"],
ws=kwargs["ws"],
ss=kwargs["ss"],
bms=kwargs["bms"],
gws=kwargs["gws"],
se_r=kwargs["se_r"],
nc=kwargs["nc"],
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 1.0 / float(n))
m.bias.data.zero_()
def _construct(self, stem_w, ds, ws, ss, bms, gws, se_r, nc):
bms = bms if bms else [1.0 for _d in ds]
gws = gws if gws else [1 for _d in ds]
stage_params = list(zip(ds, ws, ss, bms, gws))
self.stem = SimpleStemIN(3, stem_w)
block_fun = ResBottleneckBlock
prev_w = stem_w
for i, (d, w, s, bm, gw) in enumerate(stage_params):
self.add_module(
"s{}".format(i + 1), AnyStage(prev_w, w, s, d, block_fun, bm, gw, se_r)
)
prev_w = w
self.head = AnyHead(w_in=prev_w, nc=nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
def quantize_float(f, q):
return int(round(f / q) * q)
def adjust_ws_gs_comp(ws, bms, gs):
ws_bot = [int(w * b) for w, b in zip(ws, bms)]
gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]
ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]
ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]
return ws, gs
def get_stages_from_blocks(ws, rs):
ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)
ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]
s_ws = [w for w, t in zip(ws, ts[:-1]) if t]
s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()
return s_ws, s_ds
def generate_regnet(w_a, w_0, w_m, d, q=8):
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
ws_cont = np.arange(d) * w_a + w_0
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))
ws = w_0 * np.power(w_m, ks)
ws = np.round(np.divide(ws, q)) * q
num_stages, max_stage = len(np.unique(ws)), ks.max() + 1
ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()
return ws, num_stages, max_stage, ws_cont
class RegNet(AnyNet):
def __init__(self, cfg, bn=None):
b_ws, num_s, _, _ = generate_regnet(
cfg['WA'], cfg['W0'], cfg['WM'], cfg['DEPTH']
)
ws, ds = get_stages_from_blocks(b_ws, b_ws)
gws = [cfg['GROUP_W'] for _ in range(num_s)]
bms = [1 for _ in range(num_s)]
ws, gws = adjust_ws_gs_comp(ws, bms, gws)
ss = [2 for _ in range(num_s)]
se_r = 0.25 if cfg['SE_ON'] else None
STEM_W = 32
global BN
kwargs = {
"stem_w": STEM_W,
"ss": ss,
"ds": ds,
"ws": ws,
"bms": bms,
"gws": gws,
"se_r": se_r,
"nc": 1000,
}
super(RegNet, self).__init__(**kwargs)
def regnetx_200m(**kwargs):
model = RegNet(regnetX_200M_config, **kwargs)
return model
def regnetx_400m(**kwargs):
model = RegNet(regnetX_400M_config, **kwargs)
return model
def regnetx_600m(**kwargs):
model = RegNet(regnetX_600M_config, **kwargs)
return model
def regnetx_800m(**kwargs):
model = RegNet(regnetX_800M_config, **kwargs)
return model
def regnetx_1600m(**kwargs):
model = RegNet(regnetX_1600M_config, **kwargs)
return model
def regnetx_3200m(**kwargs):
model = RegNet(regnetX_3200M_config, **kwargs)
return model
def regnetx_4000m(**kwargs):
model = RegNet(regnetX_4000M_config, **kwargs)
return model
def regnetx_6400m(**kwargs):
model = RegNet(regnetX_6400M_config, **kwargs)
return model
def regnety_200m(**kwargs):
model = RegNet(regnetY_200M_config, **kwargs)
return model
def regnety_400m(**kwargs):
model = RegNet(regnetY_400M_config, **kwargs)
return model
def regnety_600m(**kwargs):
model = RegNet(regnetY_600M_config, **kwargs)
return model
def regnety_800m(**kwargs):
model = RegNet(regnetY_800M_config, **kwargs)
return model
def regnety_1600m(**kwargs):
model = RegNet(regnetY_1600M_config, **kwargs)
return model
def regnety_3200m(**kwargs):
model = RegNet(regnetY_3200M_config, **kwargs)
return model
|
MIT License
|
cc-hpc-itwm/tensorquant
|
Quantize/utils.py
|
quantizer_map
|
python
|
def quantizer_map(qmap):
if qmap is None:
return None
elif type(qmap) == str:
if qmap is '':
return None
try:
with open(qmap,'r') as hfile:
qmap = json.load(hfile)
except IOError:
qmap={"":qmap}
for key in qmap:
if type(qmap[key]) is str:
quantizer=get_quantizer(qmap[key])
qmap[key]=quantizer
return qmap
|
Creates a Quantizer map. All specified layers share the same quantizer type.
Args:
qmap: Location of the .json file, which specifies the mapping, or a dictionary with the same content.
Returns:
A dictionary containing the mapping from layers to quantizers.
|
https://github.com/cc-hpc-itwm/tensorquant/blob/bb14aacb489d8b5838c141c82b5b5d8c605202ba/Quantize/utils.py#L92-L121
|
import json
import tensorflow as tf
from TensorQuant.Quantize import Quantizers
def quantizer_selector(selector_str, arg_list):
if selector_str=="none":
quantizer = Quantizers.NoQuantizer()
elif selector_str=="zero":
quantizer = Quantizers.FixedPointQuantizer_zero(
int(arg_list[0]), int(arg_list[1]) )
elif selector_str=="down":
quantizer = Quantizers.FixedPointQuantizer_down(
int(arg_list[0]), int(arg_list[1]) )
elif selector_str=="nearest":
quantizer = Quantizers.FixedPointQuantizer_nearest(
int(arg_list[0]), int(arg_list[1]) )
elif selector_str=="stochastic":
quantizer = Quantizers.FixedPointQuantizer_stochastic(
int(arg_list[0]), int(arg_list[1]) )
elif selector_str=="sparse":
quantizer = Quantizers.SparseQuantizer(
float(arg_list[0]) )
elif selector_str=="logarithmic":
quantizer = Quantizers.LogarithmicQuantizer()
elif selector_str=="fp16":
quantizer = Quantizers.HalffpQuantizer()
elif selector_str=="binary":
if len(arg_list)==0:
quantizer = Quantizers.BinaryQuantizer( 1 )
if len(arg_list)==1:
quantizer = Quantizers.BinaryQuantizer( float(arg_list[0]) )
elif selector_str=="ternary":
if len(arg_list)==0:
quantizer = Quantizers.TernaryQuantizer( 1 )
if len(arg_list)==1:
quantizer = Quantizers.TernaryQuantizer( float(arg_list[0]) )
elif len(arg_list)==2:
quantizer = Quantizers.TernaryQuantizer( float(arg_list[0]), False, float(arg_list[1]))
else:
raise ValueError('Quantizer %s not recognized!'%(selector_str))
return quantizer
def split_quantizer_str(quantizer_str):
quantizer_type=''
args=[]
tokens = quantizer_str.split(',')
if len(tokens) > 0:
quantizer_type=tokens[0]
if len(tokens) > 1:
args=tokens[1:]
return (quantizer_type, args)
def get_quantizer(q_str):
if q_str == "":
q_str=None
if q_str is None:
return None
qtype, qargs= split_quantizer_str(q_str)
quantizer = quantizer_selector(qtype, qargs)
return quantizer
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.