repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_1/work_item_tracking_process/work_item_tracking_process_client.py
WorkItemTrackingProcessClient.remove_work_item_type_field
python
def remove_work_item_type_field(self, process_id, wit_ref_name, field_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if field_ref_name is not None: route_values['fieldRefName'] = self._serialize.url('field_ref_name', field_ref_name, 'str') self._send(http_method='DELETE', location_id='bc0ad8dc-e3f3-46b0-b06c-5bf861793196', version='5.1-preview.2', route_values=route_values)
RemoveWorkItemTypeField. [Preview API] Removes a field from a work item type. Does not permanently delete the field. :param str process_id: The ID of the process. :param str wit_ref_name: The reference name of the work item type. :param str field_ref_name: The reference name of the field.
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_1/work_item_tracking_process/work_item_tracking_process_client.py#L291-L308
 from msrest import Serializer, Deserializer from ...client import Client from . import models class WorkItemTrackingProcessClient(Client): def __init__(self, base_url=None, creds=None): super(WorkItemTrackingProcessClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '5264459e-e5e0-4bd8-b118-0985e68a4ec5' def create_process_behavior(self, behavior, process_id): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') content = self._serialize.body(behavior, 'ProcessBehaviorCreateRequest') response = self._send(http_method='POST', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.1-preview.2', route_values=route_values, content=content) return self._deserialize('ProcessBehavior', response) def delete_process_behavior(self, process_id, behavior_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if behavior_ref_name is not None: route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str') self._send(http_method='DELETE', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.1-preview.2', route_values=route_values) def get_process_behavior(self, process_id, behavior_ref_name, expand=None): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if behavior_ref_name is not None: route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str') query_parameters = {} if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ProcessBehavior', response) def get_process_behaviors(self, process_id, expand=None): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') query_parameters = {} if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ProcessBehavior]', self._unwrap_collection(response)) def update_process_behavior(self, behavior_data, process_id, behavior_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if behavior_ref_name is not None: route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str') content = self._serialize.body(behavior_data, 'ProcessBehaviorUpdateRequest') response = self._send(http_method='PUT', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.1-preview.2', route_values=route_values, content=content) return self._deserialize('ProcessBehavior', response) def create_control_in_group(self, control, process_id, wit_ref_name, group_id): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') content = self._serialize.body(control, 'Control') response = self._send(http_method='POST', location_id='1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Control', response) def move_control_to_group(self, control, process_id, wit_ref_name, group_id, control_id, remove_from_group_id=None): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if control_id is not None: route_values['controlId'] = self._serialize.url('control_id', control_id, 'str') query_parameters = {} if remove_from_group_id is not None: query_parameters['removeFromGroupId'] = self._serialize.query('remove_from_group_id', remove_from_group_id, 'str') content = self._serialize.body(control, 'Control') response = self._send(http_method='PUT', location_id='1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('Control', response) def remove_control_from_group(self, process_id, wit_ref_name, group_id, control_id): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if control_id is not None: route_values['controlId'] = self._serialize.url('control_id', control_id, 'str') self._send(http_method='DELETE', location_id='1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58', version='5.1-preview.1', route_values=route_values) def update_control(self, control, process_id, wit_ref_name, group_id, control_id): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if control_id is not None: route_values['controlId'] = self._serialize.url('control_id', control_id, 'str') content = self._serialize.body(control, 'Control') response = self._send(http_method='PATCH', location_id='1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Control', response) def add_field_to_work_item_type(self, field, process_id, wit_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') content = self._serialize.body(field, 'AddProcessWorkItemTypeFieldRequest') response = self._send(http_method='POST', location_id='bc0ad8dc-e3f3-46b0-b06c-5bf861793196', version='5.1-preview.2', route_values=route_values, content=content) return self._deserialize('ProcessWorkItemTypeField', response) def get_all_work_item_type_fields(self, process_id, wit_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') response = self._send(http_method='GET', location_id='bc0ad8dc-e3f3-46b0-b06c-5bf861793196', version='5.1-preview.2', route_values=route_values) return self._deserialize('[ProcessWorkItemTypeField]', self._unwrap_collection(response)) def get_work_item_type_field(self, process_id, wit_ref_name, field_ref_name): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if field_ref_name is not None: route_values['fieldRefName'] = self._serialize.url('field_ref_name', field_ref_name, 'str') response = self._send(http_method='GET', location_id='bc0ad8dc-e3f3-46b0-b06c-5bf861793196', version='5.1-preview.2', route_values=route_values) return self._deserialize('ProcessWorkItemTypeField', response)
MIT License
opensourcesec/cirtkit
modules/reversing/viper/peepdf/PDFCore.py
PDFObject.encodeChars
python
def encodeChars(self): return (0,'')
Encode the content of the object if possible (only for PDFName, PDFString, PDFArray and PDFStreams) @return: A tuple (status,statusContent), where statusContent is empty in case status = 0 or an error message in case status = -1
https://github.com/opensourcesec/cirtkit/blob/58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37/modules/reversing/viper/peepdf/PDFCore.py#L146-L152
import sys,os,re,hashlib,struct,aes as AES from PDFUtils import * from PDFCrypto import * from JSAnalysis import * from PDFFilters import decodeStream,encodeStream MAL_ALL = 1 MAL_HEAD = 2 MAL_EOBJ = 3 MAL_ESTREAM = 4 MAL_XREF = 5 MAL_BAD_HEAD = 6 pdfFile = None newLine = os.linesep isForceMode = False isManualAnalysis = False spacesChars = ['\x00','\x09','\x0a','\x0c','\x0d','\x20'] delimiterChars = ['<<','(','<','[','{','/','%'] monitorizedEvents = ['/OpenAction ','/AA ','/Names ','/AcroForm ', '/XFA '] monitorizedActions = ['/JS ','/JavaScript','/Launch','/SubmitForm','/ImportData'] monitorizedElements = ['/EmbeddedFiles ', '/EmbeddedFile', '/JBIG2Decode', 'getPageNthWord', 'arguments.callee', '/U3D', '/PRC', '/RichMedia', '.rawValue', 'keep.previous'] jsVulns = ['mailto', 'Collab.collectEmailInfo', 'util.printf', 'getAnnots', 'getIcon', 'spell.customDictionaryOpen', 'media.newPlayer', 'doc.printSeps', 'app.removeToolButton'] singUniqueName = 'CoolType.SING.uniqueName' bmpVuln = 'BMP/RLE heap corruption' vulnsDict = {'mailto':('mailto',['CVE-2007-5020']), 'Collab.collectEmailInfo':('Collab.collectEmailInfo',['CVE-2007-5659']), 'util.printf':('util.printf',['CVE-2008-2992']), '/JBIG2Decode':('Adobe JBIG2Decode Heap Corruption',['CVE-2009-0658']), 'getIcon':('getIcon',['CVE-2009-0927']), 'getAnnots':('getAnnots',['CVE-2009-1492']), 'spell.customDictionaryOpen':('spell.customDictionaryOpen',['CVE-2009-1493']), 'media.newPlayer':('media.newPlayer',['CVE-2009-4324']), '.rawValue':('Adobe Acrobat Bundled LibTIFF Integer Overflow',['CVE-2010-0188']), singUniqueName:(singUniqueName,['CVE-2010-2883']), 'doc.printSeps':('doc.printSeps',['CVE-2010-4091']), '/U3D':('/U3D',['CVE-2009-3953','CVE-2009-3959','CVE-2011-2462']), '/PRC':('/PRC',['CVE-2011-4369']), 'keep.previous':('Adobe Reader XFA oneOfChild Un-initialized memory vulnerability',['CVE-2013-0640']), bmpVuln:(bmpVuln,['CVE-2013-2729']), 'app.removeToolButton':('app.removeToolButton',['CVE-2013-3346'])} jsContexts = {'global':None} class PDFObject : def __init__(self, raw = None): self.references = [] self.type = '' self.value = '' self.rawValue = raw self.JSCode = [] self.updateNeeded = False self.containsJScode = False self.encryptedValue = raw self.encryptionKey = '' self.encrypted = False self.errors = [] self.referencesInElements = {} self.compressedIn = None def addError(self, errorMessage): if errorMessage not in self.errors: self.errors.append(errorMessage) def contains(self, string): value = str(self.value) rawValue = str(self.rawValue) encValue = str(self.encryptedValue) if re.findall(string,value,re.IGNORECASE) != [] or re.findall(string,rawValue,re.IGNORECASE) != [] or re.findall(string,encValue,re.IGNORECASE) != []: return True if self.containsJS(): for js in self.JSCode: if re.findall(string,js,re.IGNORECASE) != []: return True return False def containsJS(self): return self.containsJScode
MIT License
bb-ricardo/netbox-sync
module/sources/vmware/connection.py
VMWareHandler.create_session
python
def create_session(self): if self.session is not None: return True log.debug(f"Starting vCenter connection to '{self.host_fqdn}'") try: instance = SmartConnectNoSSL( host=self.host_fqdn, port=self.port, user=self.username, pwd=self.password ) atexit.register(Disconnect, instance) self.session = instance.RetrieveContent() except (gaierror, OSError) as e: log.error( f"Unable to connect to vCenter instance '{self.host_fqdn}' on port {self.port}. " f"Reason: {e}" ) return False except vim.fault.InvalidLogin as e: log.error(f"Unable to connect to vCenter instance '{self.host_fqdn}' on port {self.port}. {e.msg}") return False log.info(f"Successfully connected to vCenter '{self.host_fqdn}'") return True
Initialize session with vCenter Returns ------- bool: if initialization was successful or not
https://github.com/bb-ricardo/netbox-sync/blob/6268a84cbd9b82525700293ddd558d16eb010a7d/module/sources/vmware/connection.py#L259-L295
import atexit import pprint import re from ipaddress import ip_address, ip_network, ip_interface, IPv4Address, IPv6Address from socket import gaierror from pyVim.connect import SmartConnectNoSSL, Disconnect from pyVmomi import vim from module.common.logging import get_logger, DEBUG3 from module.common.misc import grab, dump, get_string_or_none, plural from module.common.support import normalize_mac_address, ip_valid_to_add_to_netbox from module.netbox.object_classes import * log = get_logger() class VMWareHandler: dependent_netbox_objects = [ NBTag, NBManufacturer, NBDeviceType, NBPlatform, NBClusterType, NBClusterGroup, NBDeviceRole, NBSite, NBCluster, NBDevice, NBVM, NBVMInterface, NBInterface, NBIPAddress, NBPrefix, NBTenant, NBVRF, NBVLAN ] settings = { "enabled": True, "host_fqdn": None, "port": 443, "username": None, "password": None, "cluster_exclude_filter": None, "cluster_include_filter": None, "host_exclude_filter": None, "host_include_filter": None, "vm_exclude_filter": None, "vm_include_filter": None, "netbox_host_device_role": "Server", "netbox_vm_device_role": "Server", "permitted_subnets": None, "collect_hardware_asset_tag": True, "match_host_by_serial": True, "cluster_site_relation": None, "host_site_relation": None, "vm_tenant_relation": None, "host_tenant_relation": None, "vm_platform_relation": None, "host_role_relation": None, "vm_role_relation":None, "dns_name_lookup": False, "custom_dns_servers": None, "set_primary_ip": "when-undefined", "skip_vm_comments": False, "skip_vm_templates": True, "strip_host_domain_name": False, "strip_vm_domain_name": False } deprecated_settings = { "netbox_host_device_role": "host_role_relation", "netbox_vm_device_role": "vm_role_relation" } init_successful = False inventory = None name = None source_tag = None source_type = "vmware" session = None site_name = None network_data = { "vswitch": dict(), "pswitch": dict(), "host_pgroup": dict(), "dpgroup": dict(), "dpgroup_ports": dict() } permitted_clusters = dict() processed_host_names = dict() processed_vm_names = dict() processed_vm_uuid = list() parsing_vms_the_first_time = True def __init__(self, name=None, settings=None, inventory=None): if name is None: raise ValueError(f"Invalid value for attribute 'name': '{name}'.") self.inventory = inventory self.name = name self.parse_config_settings(settings) self.source_tag = f"Source: {name}" self.site_name = f"vCenter: {name}" if self.enabled is False: log.info(f"Source '{name}' is currently disabled. Skipping") return self.create_session() if self.session is None: log.info(f"Source '{name}' is currently unavailable. Skipping") return self.init_successful = True def parse_config_settings(self, config_settings): validation_failed = False for deprecated_setting, alternative_setting in self.deprecated_settings.items(): if config_settings.get(deprecated_setting) != self.settings.get(deprecated_setting): log.warning(f"Setting '{deprecated_setting}' is deprecated and will be removed soon. " f"Consider changing your config to use the '{alternative_setting}' setting.") for setting in ["host_fqdn", "port", "username", "password"]: if config_settings.get(setting) is None: log.error(f"Config option '{setting}' in 'source/{self.name}' can't be empty/undefined") validation_failed = True if config_settings.get("permitted_subnets") is None: log.info(f"Config option 'permitted_subnets' in 'source/{self.name}' is undefined. " f"No IP addresses will be populated to Netbox!") else: config_settings["permitted_subnets"] = [x.strip() for x in config_settings.get("permitted_subnets").split(",") if x.strip() != ""] permitted_subnets = list() for permitted_subnet in config_settings["permitted_subnets"]: try: permitted_subnets.append(ip_network(permitted_subnet)) except Exception as e: log.error(f"Problem parsing permitted subnet: {e}") validation_failed = True config_settings["permitted_subnets"] = permitted_subnets for setting in [x for x in config_settings.keys() if "filter" in x]: if config_settings.get(setting) is None or config_settings.get(setting).strip() == "": continue re_compiled = None try: re_compiled = re.compile(config_settings.get(setting)) except Exception as e: log.error(f"Problem parsing regular expression for '{setting}': {e}") validation_failed = True config_settings[setting] = re_compiled for relation_option in ["cluster_site_relation", "host_site_relation", "host_tenant_relation", "vm_tenant_relation", "vm_platform_relation", "host_role_relation", "vm_role_relation"]: if config_settings.get(relation_option) is None: continue relation_data = list() relation_type = relation_option.split("_")[1] for relation in re.split(r",(?=(?:[^\"']*[\"'][^\"']*[\"'])*[^\"']*$)", config_settings.get(relation_option)): object_name = relation.split("=")[0].strip(' "') relation_name = relation.split("=")[1].strip(' "') if len(object_name) == 0 or len(relation_name) == 0: log.error(f"Config option '{relation}' malformed got '{object_name}' for " f"object name and '{relation_name}' for {relation_type} name.") validation_failed = True try: re_compiled = re.compile(object_name) except Exception as e: log.error(f"Problem parsing regular expression '{object_name}' for '{relation}': {e}") validation_failed = True continue relation_data.append({ "object_regex": re_compiled, f"{relation_type}_name": relation_name }) config_settings[relation_option] = relation_data if config_settings.get("dns_name_lookup") is True and config_settings.get("custom_dns_servers") is not None: custom_dns_servers = [x.strip() for x in config_settings.get("custom_dns_servers").split(",") if x.strip() != ""] tested_custom_dns_servers = list() for custom_dns_server in custom_dns_servers: try: tested_custom_dns_servers.append(str(ip_address(custom_dns_server))) except ValueError: log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' " f"does not appear to be an IP address.") validation_failed = True config_settings["custom_dns_servers"] = tested_custom_dns_servers if validation_failed is True: log.error("Config validation failed. Exit!") exit(1) for setting in self.settings.keys(): setattr(self, setting, config_settings.get(setting))
MIT License
quattor/aquilon
lib/aquilon/worker/commands/search_dns.py
update_classes
python
def update_classes(current_set, allowed_set): if not current_set: current_set |= allowed_set else: current_set &= allowed_set if not current_set: raise ArgumentError("Conflicting search criteria has been specified.")
Small helper for filtering options. For the first option, we want the set of possible classes initialized; for any further options, we want the existing set to be restricted. If the set becomes empty, then we have conflicting options.
https://github.com/quattor/aquilon/blob/6562ea0f224cda33b72a6f7664f48d65f96bd41a/lib/aquilon/worker/commands/search_dns.py#L56-L70
from aquilon.exceptions_ import ArgumentError from aquilon.aqdb.model import ( AddressAlias, AddressAssignment, Alias, ARecord, DnsDomain, DnsEnvironment, DnsRecord, DynamicStub, Fqdn, Network, ReservedName, ServiceAddress, SrvRecord, ) from aquilon.aqdb.model.dns_domain import parse_fqdn from aquilon.aqdb.model.network_environment import get_net_dns_envs from aquilon.worker.broker import BrokerCommand from aquilon.worker.formats.list import StringAttributeList from aquilon.worker.formats.dns_record import DnsDump from sqlalchemy.orm import (contains_eager, undefer, subqueryload, lazyload, aliased) from sqlalchemy.sql import or_, and_, null DNS_RRTYPE_MAP = {'a': ARecord, 'cname': Alias, 'srv': SrvRecord} _target_set = frozenset([Alias, SrvRecord, AddressAlias]) _ip_set = frozenset([ARecord, DynamicStub]) _primary_name_set = frozenset([ARecord, ReservedName])
Apache License 2.0
tensorflow/graphics
tensorflow_graphics/projects/gan/losses.py
wasserstein_generator_loss
python
def wasserstein_generator_loss( discriminator_output_generated_data: tf.Tensor, name: str = 'wasserstein_generator_loss') -> tf.Tensor: with tf.name_scope(name=name): return -discriminator_output_generated_data
Generator loss for Wasserstein GAN. This loss function is generally used together with a regularization of the discriminator such as weight clipping (https://arxiv.org/abs/1701.07875), gradient penalty (https://arxiv.org/abs/1704.00028) or spectral normalization (https://arxiv.org/abs/1802.05957). Args: discriminator_output_generated_data: Output of the discriminator for generated data. name: The name of the name_scope that is placed around the loss. Returns: The loss for the generator.
https://github.com/tensorflow/graphics/blob/d0817aec7dee35635814e925a59d83955459d93c/tensorflow_graphics/projects/gan/losses.py#L171-L190
import collections from typing import Sequence, Union import tensorflow as tf def gradient_penalty_loss(real_data: Union[tf.Tensor, Sequence[tf.Tensor]], generated_data: Union[tf.Tensor, Sequence[tf.Tensor]], discriminator: tf.keras.Model, weight: float = 10.0, eps: float = 1e-8, name_scope: str = 'gradient_penalty') -> tf.Tensor: with tf.name_scope(name=name_scope): with tf.GradientTape() as tape: if (isinstance(real_data, tf.Tensor) and isinstance(generated_data, tf.Tensor)): epsilon = tf.random.uniform( [tf.shape(real_data)[0]] + [1] * (real_data.shape.ndims - 1), minval=0.0, maxval=1.0, dtype=real_data.dtype) interpolated_data = epsilon * real_data + (1.0 - epsilon) * generated_data elif (isinstance(real_data, collections.Sequence) and isinstance(generated_data, collections.Sequence)): if len(real_data) != len(generated_data): raise ValueError( 'The number of elements in real_data and generated_data are ' 'expected to be equal but got: %d and %d' % (len(real_data), len(generated_data))) epsilon = tf.random.uniform( [tf.shape(real_data[0])[0]] + [1] * (real_data[0].shape.ndims - 1), minval=0.0, maxval=1.0, dtype=real_data[0].dtype) interpolated_data = [ epsilon * real_level + (1.0 - epsilon) * generated_level for real_level, generated_level in zip(real_data, generated_data) ] else: raise TypeError( 'real_data and generated data should either both be a tf.Tensor ' 'or both a sequence of tf.Tensor but got: %s and %s' % (type(real_data), type(generated_data))) tape.watch(interpolated_data) interpolated_labels = discriminator(interpolated_data) with tf.name_scope(name='gradients'): gradients = tape.gradient( target=interpolated_labels, sources=interpolated_data) if isinstance(real_data, tf.Tensor): gradient_squares = tf.reduce_sum( input_tensor=tf.square(gradients), axis=tuple(range(1, gradients.shape.ndims))) gradient_norms = tf.sqrt(gradient_squares + eps) penalties_squared = tf.square(gradient_norms - 1.0) return weight * penalties_squared else: all_penalties_squared = [] for gradients_level in gradients: gradient_squares_level = tf.reduce_sum( input_tensor=tf.square(gradients_level), axis=tuple(range(1, gradients_level.shape.ndims))) gradient_norms_level = tf.sqrt(gradient_squares_level + eps) all_penalties_squared.append(tf.square(gradient_norms_level - 1.0)) return weight * tf.add_n(all_penalties_squared) * 1.0 / len(real_data) def _sum_of_squares(input_tensor): return tf.reduce_sum( input_tensor=tf.square(input_tensor), axis=tuple(range(1, input_tensor.shape.ndims))) def r1_regularization(real_data: Union[tf.Tensor, Sequence[tf.Tensor]], discriminator: tf.keras.Model, weight: float = 10.0, name='r1_regularization') -> tf.Tensor: with tf.name_scope(name): with tf.GradientTape() as tape: tape.watch(real_data) discriminator_output = discriminator(real_data) with tf.name_scope(name='gradients'): gradients = tape.gradient( target=discriminator_output, sources=real_data) if isinstance(real_data, tf.Tensor): gradient_squares = _sum_of_squares(gradients) return weight * 0.5 * gradient_squares else: gradient_squares_level = [ _sum_of_squares(gradients_level) for gradients_level in gradients ] return weight * 0.5 * tf.add_n(gradient_squares_level) * 1.0 / len( real_data)
Apache License 2.0
mosen/salt-osx
_modules/plist_serialization.py
read_keys
python
def read_keys(path, keys): dataObject = _read_plist(path) collector = {} _objects_for_dict(dataObject, keys, collector) return collector
Read values of keys described by a dict. Each dict entry is traversed until it has no child dict. path An absolute path to a property list (.plist) file, including the extension keys A dict describing a key or nested keys, with any leaf values used to look up the corresponding plist value.
https://github.com/mosen/salt-osx/blob/818d4ae89bb2853b28999a8ddb883c0fe1b1a657/_modules/plist_serialization.py#L536-L553
import logging import salt.exceptions log = logging.getLogger(__name__) HAS_LIBS = False try: import os from Foundation import NSData, NSPropertyListSerialization, NSPropertyListMutableContainers, NSPropertyListXMLFormat_v1_0, NSPropertyListBinaryFormat_v1_0, NSNumber, NSString, NSMutableDictionary, NSMutableArray, NSMutableData HAS_LIBS = True except ImportError: log.debug('Error importing dependencies for plist execution module.') __virtualname__ = 'plist' def __virtual__(): if __grains__.get('kernel') != 'Darwin': return False if not HAS_LIBS: return False return __virtualname__ def _read_plist(filepath): if not os.path.isfile(filepath): log.debug('Tried to read non-existent property list at path: {0}'.format(filepath)) return None plistData = NSData.dataWithContentsOfFile_(filepath) dataObject, plistFormat, error = NSPropertyListSerialization.propertyListFromData_mutabilityOption_format_errorDescription_( plistData, NSPropertyListMutableContainers, None, None) if error: error = error.encode('ascii', 'ignore') raise salt.exceptions.SaltInvocationError( 'Error decoding Property List : {}'.format(error) ) else: return dataObject def _write_plist(dataObject, filepath, format=NSPropertyListXMLFormat_v1_0): plistData, error = NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_( dataObject, format, None) if error: error = error.encode('ascii', 'ignore') raise salt.exceptions.SaltInvocationError( 'Error encoding Property List: {}'.format(error) ) else: if plistData.writeToFile_atomically_(filepath, True): return else: raise salt.exceptions.SaltInvocationError( 'Failed to write Property List at path: {}'.format(filepath) ) def _generate_plist_string(rootObject, format=NSPropertyListXMLFormat_v1_0): plistData, error = NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_( rootObject, format, None) if error: error = error.encode('ascii', 'ignore') raise salt.exceptions.SaltInvocationError( 'Error encoding Property List: {}'.format(error) ) else: return str(plistData) def _value_to_nsobject(value, nstype): return { 'string': lambda v: NSString.stringWithUTF8String_(v), 'int': lambda v: NSNumber.numberWithInt_(v), 'float': lambda v: NSNumber.numberWithFloat_(v), 'bool': lambda v: True if v == 'true' else False, 'data': lambda v: NSMutableData.dataWithLength_(len(value)).initWithBase64EncodedString_options_(value) }[nstype](value) def _objects_for_dict(dict, keys, collector): if dict is None: collector = None return for key, value in keys.items(): if type(value) is dict: collector[key] = {} plist_value = dict.objectForKey_(key) _objects_for_dict(plist_value, value, collector[key]) else: collector[key] = dict.objectForKey_(key) def _set_objects_for_keys(dict, keys, changed=None): if changed is None: changed = dict() for key, value in keys.items(): existing_value = dict.objectForKey_(key) if type(value) is dict: if existing_value is None: child = NSMutableDictionary() dict.setObject_forKey_(child, key) changed[key] = {} _set_objects_for_keys(child, value, changed[key]) else: if existing_value != value: dict.setObject_forKey_(value, key) changed[key] = value def _remove_objects_for_keys(dict, keys, changed=None): if changed is None: changed = dict() for key, value in keys.items(): existing_value = dict.objectForKey_(key) if not existing_value is None: if type(value) is dict: changed[key] = {} _remove_objects_for_keys(existing_value, value, changed[key]) else: dict.removeObjectForKey_(key) changed[key] = value def _object_for_key_list(dict, keys, create=False): key = keys.pop(0) if len(key) == 0: return dict if len(keys) > 0: value = dict.objectForKey_(key) if value is None: if create: created = NSMutableDictionary() dict.setObject_forKey_(created, key) return _object_for_key_list(created, keys, create) else: log.debug('No key found in Property List: {0}'.format(key)) return None else: return _object_for_key_list(dict.objectForKey_(key), keys, create) else: return dict.objectForKey_(key) def _set_object_for_key_list(dict, keys, value, create=True, createNSType=NSMutableDictionary): key = keys.pop(0) if len(key) == 0: return dict if len(keys) > 0: return _object_for_key_list(dict.objectForKey_(key), keys, create) else: dict.setObject_forKey_(value, key) def _addObjectForKeyList(dict, keys, value, create=True): key = keys.pop(0) if len(key) == 0: return dict if len(keys) > 0: return _object_for_key_list(dict.objectForKey_(key), keys, create) else: dict.setObject_forKey_(value, key) def _remove_object_for_key_list(dict, keys): key = keys.pop(0) if len(key) == 0: return dict if len(keys) > 0: return _object_for_key_list(dict.objectForKey_(key), keys) else: return dict.removeObjectForKey_(key) def gen_string(data, format='xml'): serialization = NSPropertyListXMLFormat_v1_0 if format == 'xml' else NSPropertyListBinaryFormat_v1_0 plistData, error = NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_( data, serialization, None) if error: error = error.encode('ascii', 'ignore') log.debug('Error writing plist') log.debug(error) raise NSPropertyListSerializationException(error) else: return plistData def parse_string(data): plistData = buffer(data) dataObject, plistFormat, error = NSPropertyListSerialization.propertyListFromData_mutabilityOption_format_errorDescription_( plistData, NSPropertyListMutableContainers, None, None) if error: error = error.encode('ascii', 'ignore') import traceback log.debug('Error parsing plist from string') log.debug(error) raise NSPropertyListSerializationException(error) else: return dataObject def read_key(path, key=''): dataObject = _read_plist(path) if dataObject is None: return None keys = key.split(':') if type(keys) is str: keys = list(keys) value = _object_for_key_list(dataObject, keys) return value def write_key(path, key, nstype, value): log.debug('Reading original plist for modification at path: %s' % path) dataObject = _read_plist(path) log.debug('Deriving key hierarchy from colon separated string') keys = key.split(':') if type(keys) is str: keys = list(keys) if dataObject is None: dataObject = NSMutableDictionary() log.debug('Performing string to NSObject conversion') nsval = _value_to_nsobject(value, nstype) log.debug('Setting object value in hierarchy') _set_object_for_key_list(dataObject, keys, nsval) log.debug('Writing out plist to original path') _write_plist(dataObject, path) def delete_key(path, key): dataObject = _read_plist(path) if dataObject is None: return None keys = key.split(':') if type(keys) is str: keys = list(keys) _remove_object_for_key_list(dataObject, keys) _write_plist(dataObject, path) def append_key(path, key, nstype, value): log.debug('Reading original plist for modification at path: %s' % path) root = _read_plist(path) log.debug('Deriving key hierarchy from colon separated string') keys = key.split(':') if type(keys) is str: keys = list(keys) if root is None: raise salt.exceptions.SaltInvocationError('Tried to append to non existing file, not currently supported.') log.debug('Performing string to NSObject conversion') nsval = _value_to_nsobject(value, nstype) log.debug('Setting object value in hierarchy') if len(keys) > 1: parent = _object_for_key_list(root, keys[:-1]) else: parent = root log.debug('Updating or creating object at key: {}'.format(keys[-1])) collection = parent.objectForKey_(keys[-1]) if collection is None or type(collection) is not NSMutableArray: collection = NSMutableArray() parent.setObject_forKey_(collection, keys[-1]) collection.addObject_(nsval) log.debug('Writing out plist to original path') from pprint import pprint pprint(root) xml_plist = _generate_plist_string(root, NSPropertyListXMLFormat_v1_0) log.debug(xml_plist) _write_plist(root, path) def read(path): dataObject = _read_plist(path) return dataObject def write(path, contents_dict): _write_plist(path, contents_dict)
MIT License
anatolikalysch/vmattack
static/static_deobfuscate.py
get_distorm_info
python
def get_distorm_info(inst_addr): size = ItemSize(inst_addr) inst_bytes = GetManyBytes(inst_addr, size) inst = distorm3.Decompose(inst_addr, inst_bytes, distorm3.Decode64Bits, 0) print inst[0] i = inst[0] print 'InstBytes ', i.instructionBytes print 'Opcode ', i.opcode for o in i.operands: print 'operand ', o print 'operand type', o.type for f in i.flags: print 'flag ', f print 'raw_flags ', i.rawFlags print 'inst_class ', i.instructionClass print 'flow_control ', i.flowControl print 'address ', i.address print 'size ', i.size print 'dt ', i.dt print 'valid ', i.valid print 'segment ', i.segment print 'unused_Prefixes ', i.unusedPrefixesMask print 'mnemonic ', i.mnemonic print 'inst_class ', i.instructionClass
@brief Prints whole distrom3 info of the given instruction @param inst_addr Address of instruction
https://github.com/anatolikalysch/vmattack/blob/67dcce6087163d85bbe7780e3f6e6e9e72e2212a/static/static_deobfuscate.py#L612-L640
from idaapi import * from idautils import * from idc import * from lib.Instruction import Instruction from lib.Optimize import * from lib.Register import (get_reg_by_size, get_reg_class) from lib.VmInstruction import VmInstruction from lib.VmInstruction import (add_ret_pop, to_vpush) import distorm3 import lib.PseudoInstruction as PI import lib.StartVal as SV from ui.BBGraphViewer import show_graph from lib.VMRepresentation import VMContext, get_vmr bb_colors = [0xddddff, 0xffdddd, 0xddffdd, 0xffddff, 0xffffdd, 0xddffff] def calc_code_addr(instr, base): if SV.dissassm_type == SV.ASSEMBLER_32: return Dword((instr * 4) + base) else: return Qword((instr * 8) + base) def get_instruction_list(vc, base): inst_addr = calc_code_addr(vc, base) if not isCode(GetFlags(inst_addr)): MakeUnknown(inst_addr, 1, DOUNK_SIMPLE) MakeCode(inst_addr) inst_lst = [] end_of_instruction_block = False while not end_of_instruction_block: size = ItemSize(inst_addr) inst_bytes = GetManyBytes(inst_addr, size) inst = Instruction(inst_addr, inst_bytes) if inst.is_uncnd_jmp(): end_of_instruction_block = True elif inst.is_ret(): inst_lst.append(inst) end_of_instruction_block = True else: inst_lst.append(inst) inst_addr = NextHead(inst_addr) return inst_lst def clear_comments(ea, endaddr): while ea <= endaddr: MakeComm(ea, "") ea = ea + 1 def get_start_push(vm_addr): inst_addr = vm_addr ret = [] end_of_instruction_block = False while not end_of_instruction_block: size = ItemSize(inst_addr) inst_bytes = GetManyBytes(inst_addr, size) inst = Instruction(inst_addr, inst_bytes) if inst.is_mov_basep_stackp(): end_of_instruction_block = True else: inst_addr = NextHead(inst_addr) ret.append(inst) return ret jump_dict = {} def get_catch_reg(reg, length): reg_class = get_reg_class(reg) if reg_class == None: return '' catch_reg = get_reg_by_size(reg_class, length * 8) if catch_reg == None: catch_reg = '' return catch_reg def first_deobfuscate(ea, base, endaddr): curraddr = ea instr_lst = [] vminst_lst = [] catch_value = None while curraddr <= endaddr: inst_addr = curraddr vc = Byte(curraddr) instr_lst = get_instruction_list(vc, base) if len(instr_lst) < 1: print 'error occured' curraddr += 1 continue has_catch = False catch_instr = None for pos, inst in enumerate(instr_lst): if inst.is_catch_instr(): catch_instr = inst has_catch = True break if has_catch: if catch_instr.is_byte_mov(): catch_value = Byte(curraddr + 1) length = 2 elif catch_instr.is_word_mov(): catch_value = Word(curraddr + 1) length = 3 elif catch_instr.is_double_mov(): catch_value = Dword(curraddr + 1) length = 5 elif catch_instr.is_quad_mov(): catch_value = Qword(curraddr + 1) length = 9 else: length = 1 curraddr += length MakeUnknown(inst_addr, length, DOUNK_SIMPLE) if has_catch: catch_reg = get_catch_reg(catch_instr.get_op_str(1), length - 1) else: catch_reg = '' vm_inst = VmInstruction(instr_lst, catch_value, catch_reg, inst_addr) vminst_lst.append(vm_inst) if (vm_inst.Pseudocode == None): continue if (vm_inst.Pseudocode.inst_type == PI.JMP_T or vm_inst.Pseudocode.inst_type == PI.RET_T): if isCode(GetFlags(curraddr)): if curraddr in jump_dict: curraddr = jump_dict[curraddr] continue Jump(curraddr) answer = AskYN(0, ('Should this regular x86 at address ' + '{0:#x} instructions be deobfuscated?'.format(curraddr))) if answer == 0 or answer == -1: old_addr = curraddr curraddr = AskAddr(curraddr, 'Insert Address where deobfuscation will be continued!') jump_dict[old_addr] = curraddr return vminst_lst def deobfuscate_all(base): catch_byte = 0x00 vm_inst_lst = [] while catch_byte <= 0xff: inst_lst = get_instruction_list(catch_byte, base) vm_inst = VmInstruction(inst_lst, 0x0, '', (SV.dissassm_type / 8 * catch_byte) + base) vm_inst.get_pseudo_code() vm_inst_lst.append(vm_inst) catch_byte += 1 return vm_inst_lst def display_ps_inst(ps_inst_lst): length = len(ps_inst_lst) comm = '' for pos, item in enumerate(ps_inst_lst): if pos < length - 1: addr = item.addr next_addr = ps_inst_lst[pos + 1].addr else: addr = item.addr next_addr = item.addr + 1 if addr == next_addr: comm += str(item)[:len(str(item)) - 1] + '\t\t' + item.comment + '\n' else: comm += str(item)[:len(str(item)) - 1] + '\t\t' + item.comment + '\n' MakeComm(addr, comm) comm = '' def display_vm_inst(vm_inst_lst): length = len(vm_inst_lst) comm = '' for pos, item in enumerate(vm_inst_lst): if pos < length - 1: addr = item.addr next_addr = vm_inst_lst[pos + 1].addr else: addr = item.addr next_addr = item.addr + 1 if addr == next_addr: comm += str(item)[:len(str(item)) - 1] + '\n' else: comm += str(item)[:len(str(item)) - 1] + '\n' MakeComm(addr, comm) comm = '' def read_in_comments(start, end): ret = [] addr = start while addr <= end: comment = CommentEx(addr, 0) r_comment = CommentEx(addr, 1) if comment == None and r_comment == None: addr += 1 elif r_comment == None and comment != None: ret.append((comment, addr)) addr += 1 elif r_comment != None and comment == None: print 'r_comment' ret.append((r_comment, addr)) addr += 1 else: erg_comm = r_comment + '\n' + comment ret.append((erg_comm, addr)) addr += 1 return ret def find_start(start, end): addr = start erg = 0 counter = 0 while addr <= end: a = DfirstB(addr) if (a != BADADDR): counter += 1 erg = addr addr += 1 if counter != 1: print 'could not resolve start_addr' return BADADDR else: return erg def set_dissassembly_type(): if BADADDR == 0xffffffffffffffff: SV.dissassm_type = SV.ASSEMBLER_64 else: SV.dissassm_type = SV.ASSEMBLER_32 def get_jaddr_from_comments(pp_lst, comment_lst): ret = [] for comment, caddr in comment_lst: if 'jumps to: ' in comment: index = comment.find('jumps to: 0x') if index == -1: continue jmps = comment[index:len(comment)] index = jmps.find('0x') if index == -1: continue jmps = jmps[index:len(jmps)] str_lst = jmps.split(', ') for sub_str in str_lst: ret.append((long(sub_str, 16), caddr)) else: continue return ret def get_jmp_input_found(cjmp_addrs, jmp_addrs): ejmp_addrs = [] ejmp_addrs += cjmp_addrs for (jaddr, inst_addr) in jmp_addrs: found = False for _, cinst_addr in cjmp_addrs: if cinst_addr == inst_addr: found = True if not found: ejmp_addrs.append((jaddr, inst_addr)) return ejmp_addrs def change_comments(pp_lst, cjmp_addrs): for item in pp_lst: if item.inst_type == PI.JMP_T: found_vals = [] for jaddr, inst_addr in cjmp_addrs: if inst_addr == item.addr: found_vals.append(jaddr) if len(found_vals) == 0: continue comment = 'jumps to: ' found_addr = False for addr in found_vals: comment += '{0:#x}, '.format(addr) comment = comment[:len(comment) - 2] item.comment = comment def get_jmp_addr(bb): for inst in bb: if inst.inst_type == PI.JMP_T: return inst.addr return None def has_ret(bb): return (lambda bb: True if 'ret_T' in map(lambda inst: inst.inst_type, bb) else False)(bb) def get_jmp_loc(jmp_addr, jmp_addrs): return [jmp_to for jmp_to, j_addr in jmp_addrs if j_addr == jmp_addr] def deobfuscate(code_saddr, base_addr, code_eaddr, vm_addr, display=4, real_start=0): set_dissassembly_type() comment_list = read_in_comments(code_saddr, code_eaddr) if real_start == 0: start_addr = find_start(code_saddr, code_eaddr) else: start_addr = real_start if start_addr == BADADDR: start_addr = code_saddr f_start_lst = [] if vm_addr != 0: f_start_lst = get_start_push(vm_addr) f_start_lst = to_vpush(f_start_lst, start_addr) vm_inst_lst = first_deobfuscate(code_saddr, base_addr, code_eaddr) vm_inst_lst1 = deobfuscate_all(base_addr) display_vm_inst(vm_inst_lst1) pseudo_lst = add_ret_pop(vm_inst_lst) push_pop_lst = [] lst = [] for inst in pseudo_lst: if inst.addr == start_addr: lst = f_start_lst lst += inst.make_pop_push_rep() for rep in lst: push_pop_lst.append(rep) lst = [] cjmp_addrs = get_jaddr_from_comments(push_pop_lst, comment_list) jmp_addrs = get_jmp_addresses(push_pop_lst, code_eaddr) jmp_addrs = get_jmp_input_found(cjmp_addrs, jmp_addrs) change_comments(push_pop_lst, cjmp_addrs) basic_blocks = find_basic_blocks(push_pop_lst, start_addr, jmp_addrs) if basic_blocks == None: basic_blocks = [(code_saddr, code_eaddr)] color_basic_blocks(basic_blocks) basic_lst = make_bb_lists(push_pop_lst, basic_blocks) has_loc = has_locals(basic_lst) clear_comments(code_saddr, code_eaddr) if display == 0: vm_list = f_start_lst + pseudo_lst display_vm_inst(vm_list) elif display == 1: display_ps_inst(push_pop_lst) else: opt_basic = [] display_lst = [] nodes = [] edges = [] for lst in basic_lst: opt_lst = optimize(lst, has_loc) display_lst += opt_lst opt_basic.append(opt_lst) display_ps_inst(display_lst) for node, bb in enumerate(opt_basic): if bb == []: continue nodes.append(('bb%d' % (node))) if has_ret(bb): continue jmp_addr = get_jmp_addr(bb) if jmp_addr == None: edges.append(('bb%d' % (node), 'bb%d' % (node + 1))) else: jmp_locs = get_jmp_loc(jmp_addr, jmp_addrs) for loc in jmp_locs: for pos, (saddr, eaddr) in enumerate(basic_blocks): if loc >= saddr and loc < eaddr: edges.append(('bb%d' % (node), 'bb%d' % (pos))) try: g = show_graph(nodes, edges, opt_basic, jmp_addrs, basic_blocks, real_start) except Exception, e: print e.message if jmp_addrs != []: min_jmp = min(jmp_addrs)[0] else: min_jmp = BADADDR return min_jmp def start(code_saddr, base_addr, code_eaddr, vm_addr, display=4, real_start=0): old_min = BADADDR n_min = code_saddr start = real_start while old_min > n_min: old_min = n_min n_min = deobfuscate(old_min, base_addr, code_eaddr, vm_addr, display, start) if start == 0: start = code_saddr def color_basic_blocks(basic_lst): color = 0 for start, end in basic_lst: if (start + 1 == end): continue pos = start while pos < end: SetColor(pos, CIC_ITEM, bb_colors[color % len(bb_colors)]) pos += 1 color += 1 def make_bb_lists(pp_lst, basic_lst): bb_lists = [] for (s_addr, e_addr) in basic_lst: bb_lst = [] for inst in pp_lst: if inst.addr >= s_addr and inst.addr < e_addr: bb_lst.append(inst) bb_lists.append(bb_lst) return bb_lists def has_locals(bb_lsts): has_ebp_mov = False for bb in bb_lsts: has_ebp_mov = False for inst in bb: if inst.inst_type == PI.MOV_EBP_T: has_ebp_mov = True if inst.inst_type == PI.RET_T and has_ebp_mov: return True return False def print_bb(bb_lsts): block_count = 1 for lst in bb_lsts: print 'Start BB', block_count for inst in lst: print str(inst)[:len(str(inst)) - 1] print 'End BB', block_count block_count += 1
MIT License
wechaty/python-wechaty
src/wechaty/plugin.py
WechatyPlugin.get_dependency_plugins
python
def get_dependency_plugins() -> List[str]: return []
get dependency plugins
https://github.com/wechaty/python-wechaty/blob/f7406b0c20e5749da2cee7cf1a4666742d219aae/src/wechaty/plugin.py#L119-L123
from __future__ import annotations import logging import re from abc import ABCMeta from collections import defaultdict, OrderedDict from copy import deepcopy from dataclasses import dataclass from datetime import datetime from enum import Enum from typing import ( TYPE_CHECKING, List, Optional, Dict, Union, Any, cast) from wechaty_puppet import ( get_logger, EventErrorPayload, EventHeartbeatPayload, EventReadyPayload, ScanStatus ) from .exceptions import ( WechatyPluginError, ) if TYPE_CHECKING: from .wechaty import ( Wechaty ) from .user import ( Room, RoomInvitation, Friendship, Contact, Message, ) log: logging.Logger = get_logger(__name__) @dataclass class WechatyPluginOptions: name: Optional[str] = None metadata: Optional[dict] = None class PluginStatus(Enum): Running = 0 Stopped = 1 class WechatyPlugin(metaclass=ABCMeta): def __init__(self, options: Optional[WechatyPluginOptions] = None): self.output: Dict[str, Any] = {} self.bot: Optional[Wechaty] = None if options is None: options = WechatyPluginOptions() self.options = options def set_bot(self, bot: Wechaty) -> None: self.bot = bot async def init_plugin(self, wechaty: Wechaty) -> None: @property def name(self) -> str: if not self.options.name: self.options.name = self.__class__.__name__ return self.options.name @staticmethod
Apache License 2.0
tomerfi/my_home_assistant_configuration
custom_components/broadlink_s1c/sensor.py
WatchSensors.__init__
python
def __init__(self, hass: HomeAssistant, conn_obj: S1C) -> None: threading.Thread.__init__(self) self._hass = hass self._ok_to_run = False self._conn_obj = conn_obj self._last_exception_dt = None self._exception_count = 0 if self._conn_obj._authorized: self._ok_to_run = True self._hub = self._conn_obj.get_hub_connection()
Initialize the watcher object and create the thread.
https://github.com/tomerfi/my_home_assistant_configuration/blob/a6b7abc6a01126ad1bfa65eb7008f7b6b037347c/custom_components/broadlink_s1c/sensor.py#L290-L300
import asyncio import binascii import datetime import logging import socket import threading import traceback from typing import Callable, Dict, Generator, Optional import homeassistant.helpers.config_validation as cv import voluptuous as vol from broadlink import S1C from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (CONF_IP_ADDRESS, CONF_MAC, CONF_TIMEOUT, EVENT_HOMEASSISTANT_STOP, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_CLOSED, STATE_OPEN, STATE_UNKNOWN) from homeassistant.core import Event, HomeAssistant from homeassistant.helpers.entity import Entity from homeassistant.util.dt import now _LOGGER = logging.getLogger(__name__) DOMAIN = "sensor" ENTITY_ID_FORMAT = DOMAIN + ".broadlink_s1c_{}" DEFAULT_TIMEOUT = 10 STATE_NO_MOTION = "no_motion" STATE_MOTION_DETECTED = "motion_detected" STATE_TAMPERED = "tampered" STATE_ALARM_SOS = "sos" UPDATE_EVENT = "BROADLINK_S1C_SENSOR_UPDATE" EVENT_PROPERTY_NAME = "name" EVENT_PROPERTY_STATE = "state" SENSOR_TYPE_DOOR_SENSOR = "Door Sensor" SENSOR_TYPE_DOOR_SENSOR_ICON = "mdi:door" SENSOR_TYPE_MOTION_SENSOR = "Motion Sensor" SENSOR_TYPE_MOTION_SENSOR_ICON = "mdi:walk" SENSOR_TYPE_KEY_FOB = "Key Fob" SENSOR_TYPE_KEY_FOB_ICON = "mdi:remote" SENSOR_DEFAULT_ICON = "mdi:security-home" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_MAC): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, } ) @asyncio.coroutine def async_setup_platform( hass: HomeAssistant, config: Dict, async_add_devices: Callable, discovery_info: Optional[Dict] = None, ) -> bool: ip_address = config[CONF_IP_ADDRESS] mac = config[CONF_MAC].encode().replace(b":", b"") mac_addr = binascii.unhexlify(mac) timeout = config[CONF_TIMEOUT] conn_obj = HubConnection(ip_address, mac_addr, timeout) raw_data = conn_obj.get_initial_data() sensors = [] for i, sensor in enumerate(raw_data["sensors"]): sensors.append( S1C_SENSOR( hass, sensor["name"], sensor["type"], conn_obj.parse_status(sensor["type"], str(sensor["status"])), now(), ) ) if sensors: async_add_devices(sensors, True) WatchSensors(hass, conn_obj).start() return True class S1C_SENSOR(Entity): def __init__( self, hass: HomeAssistant, name: str, sensor_type: str, status: str, last_changed: datetime.datetime, ) -> None: self.entity_id = ENTITY_ID_FORMAT.format( name.replace(" ", "_").replace("-", "_").lower() ) self._hass = hass self._name = name self._sensor_type = sensor_type self._state = status self._last_changed = last_changed hass.bus.async_listen(UPDATE_EVENT, self.async_event_listener) @property def name(self) -> str: return self._name @property def should_poll(self) -> bool: return False @property def state(self) -> str: return self._state @property def icon(self) -> str: if self._sensor_type == SENSOR_TYPE_DOOR_SENSOR: return SENSOR_TYPE_DOOR_SENSOR_ICON elif self._sensor_type == SENSOR_TYPE_KEY_FOB: return SENSOR_TYPE_KEY_FOB_ICON elif self._sensor_type == SENSOR_TYPE_MOTION_SENSOR: return SENSOR_TYPE_MOTION_SENSOR_ICON else: return SENSOR_DEFAULT_ICON @property def device_state_attributes(self) -> Dict: return { "sensor_type": self._sensor_type, "last_changed": self._last_changed, } @asyncio.coroutine def async_event_listener(self, event: Event) -> Generator: if event.data.get(EVENT_PROPERTY_NAME) == self._name: self._state = event.data.get(EVENT_PROPERTY_STATE) self._last_changed = event.time_fired yield from self.async_update_ha_state() class HubConnection(object): def __init__(self, ip_addr: str, mac_addr: bytes, timeout: int) -> None: import broadlink self._hub = broadlink.S1C((ip_addr, 80), mac_addr, None) self._hub.timeout = timeout self._authorized = self.authorize() if self._authorized: _LOGGER.info("succesfully connected to s1c hub") self._initial_data = self._hub.get_sensors_status() else: _LOGGER.error( "failed to connect s1c hub, not authorized." "Fix the problem and restart the system." ) self._initial_data = None def authorize(self, retry: int = 3) -> bool: try: auth = self._hub.auth() except socket.timeout: auth = False if not auth and retry > 0: return self.authorize(retry - 1) return auth def get_initial_data(self) -> Dict: return self._initial_data def get_hub_connection(self) -> S1C: return self._hub def parse_status(self, sensor_type: str, sensor_status: str) -> str: if sensor_type == SENSOR_TYPE_DOOR_SENSOR and sensor_status in ( "0", "128", ): return STATE_CLOSED elif sensor_type == SENSOR_TYPE_DOOR_SENSOR and sensor_status in ( "16", "144", ): return STATE_OPEN elif sensor_type == SENSOR_TYPE_DOOR_SENSOR and sensor_status == "48": return STATE_TAMPERED elif sensor_type == SENSOR_TYPE_MOTION_SENSOR and sensor_status in ( "0", "128", ): return STATE_NO_MOTION elif ( sensor_type == SENSOR_TYPE_MOTION_SENSOR and sensor_status == "16" ): return STATE_MOTION_DETECTED elif ( sensor_type == SENSOR_TYPE_MOTION_SENSOR and sensor_status == "32" ): return STATE_TAMPERED elif sensor_type == SENSOR_TYPE_KEY_FOB and sensor_status == "16": return STATE_ALARM_DISARMED elif sensor_type == SENSOR_TYPE_KEY_FOB and sensor_status == "32": return STATE_ALARM_ARMED_AWAY elif sensor_type == SENSOR_TYPE_KEY_FOB and sensor_status == "64": return STATE_ALARM_ARMED_HOME elif sensor_type == SENSOR_TYPE_KEY_FOB and sensor_status in ( "0", "128", ): return STATE_ALARM_SOS else: _LOGGER.warning( "Unknown status %s for type %s.", sensor_status, sensor_type ) return STATE_UNKNOWN class WatchSensors(threading.Thread):
MIT License
homebysix/recipe-robot
scripts/test/test_functional.py
verify_processor_args
python
def verify_processor_args(processor_name, recipe, expected_args): assert_in( processor_name, [processor["Processor"] for processor in recipe["Process"]] ) actual_args = dict( [ processor for processor in recipe["Process"] if processor["Processor"] == processor_name ][0]["Arguments"] ) assert_dict_equal(expected_args, actual_args)
Verify processor arguments against known dict.
https://github.com/homebysix/recipe-robot/blob/fc51b3134b6db7cd86641785d75a0b994ae88154/scripts/test/test_functional.py#L78-L90
from __future__ import absolute_import, print_function import os import plistlib import shutil import subprocess from random import shuffle import yaml from nose.tools import * from recipe_robot_lib.tools import strip_dev_suffix RECIPE_TYPES = ("download", "pkg", "munki", "install", "jss") def robot_runner(input_path): proc = subprocess.run( ["./recipe-robot", "--ignore-existing", "--verbose", input_path], check=False ) assert_equal( proc.returncode, 0, "{}: Recipe Robot returned nonzero return code.".format(input_path), ) def autopkg_runner(recipe_path): proc = subprocess.run( ["/usr/local/bin/autopkg", "run", recipe_path, "--quiet"], check=False ) assert_equal( proc.returncode, 0, "{}: AutoPkg returned nonzero return code.".format(recipe_path), )
Apache License 2.0
google/transitfeed
transitfeed/schedule.py
Schedule.GenerateDateTripsDeparturesList
python
def GenerateDateTripsDeparturesList(self, date_start, date_end): service_id_to_trips = defaultdict(lambda: 0) service_id_to_departures = defaultdict(lambda: 0) for trip in self.GetTripList(): headway_start_times = trip.GetFrequencyStartTimes() if headway_start_times: trip_runs = len(headway_start_times) else: trip_runs = 1 service_id_to_trips[trip.service_id] += trip_runs service_id_to_departures[trip.service_id] += ( (trip.GetCountStopTimes() - 1) * trip_runs) date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end) date_trips = [] for date, services in date_services: day_trips = sum(service_id_to_trips[s.service_id] for s in services) day_departures = sum( service_id_to_departures[s.service_id] for s in services) date_trips.append((date, day_trips, day_departures)) return date_trips
Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples
https://github.com/google/transitfeed/blob/d727e97cb66ac2ca2d699a382ea1d449ee26c2a1/transitfeed/schedule.py#L770-L804
from __future__ import absolute_import import bisect import datetime import itertools import os try: import sqlite3 as sqlite native_sqlite = True except ImportError: try: from pysqlite2 import dbapi2 as sqlite native_sqlite = True except ImportError: from com.ziclix.python.sql import zxJDBC as sqlite native_sqlite = False import tempfile import time import warnings import weakref import zipfile from . import gtfsfactoryuser from . import problems as problems_module from .util import defaultdict from . import util from .compat import StringIO class Schedule(object): def __init__(self, problem_reporter=None, memory_db=True, check_duplicate_trips=False, gtfs_factory=None): if gtfs_factory is None: gtfs_factory = gtfsfactoryuser.GtfsFactoryUser().GetGtfsFactory() self._gtfs_factory = gtfs_factory self._table_columns = {} self._agencies = {} self.stops = {} self.routes = {} self.trips = {} self.service_periods = {} self.fares = {} self.fare_zones = {} self.feed_info = None self._shapes = {} self._transfers = defaultdict(lambda: []) self._default_service_period = None self._default_agency = None if problem_reporter is None: self.problem_reporter = problems_module.default_problem_reporter else: self.problem_reporter = problem_reporter self._check_duplicate_trips = check_duplicate_trips self.ConnectDb(memory_db) def AddTableColumn(self, table, column): if column not in self._table_columns[table]: self._table_columns[table].append(column) def AddTableColumns(self, table, columns): table_columns = self._table_columns.setdefault(table, []) for attr in columns: if attr not in table_columns: table_columns.append(attr) def GetTableColumns(self, table): return self._table_columns[table] def __del__(self): self._connection.cursor().close() self._connection.close() if hasattr(self, '_temp_db_filename'): os.remove(self._temp_db_filename) def ConnectDb(self, memory_db): def connector(db_file): if native_sqlite: return sqlite.connect(db_file) else: return sqlite.connect("jdbc:sqlite:%s" % db_file, "", "", "org.sqlite.JDBC") if memory_db: self._connection = connector(":memory:") else: try: self._temp_db_file = tempfile.NamedTemporaryFile() self._connection = connector(self._temp_db_file.name) except sqlite.OperationalError: self._temp_db_file = None (fd, self._temp_db_filename) = tempfile.mkstemp(".db") os.close(fd) self._connection = connector(self._temp_db_filename) cursor = self._connection.cursor() cursor.execute("""CREATE TABLE stop_times ( trip_id CHAR(50), arrival_secs INTEGER, departure_secs INTEGER, stop_id CHAR(50), stop_sequence INTEGER, stop_headsign VAR CHAR(100), pickup_type INTEGER, drop_off_type INTEGER, shape_dist_traveled FLOAT, timepoint INTEGER);""") cursor.execute("""CREATE INDEX trip_index ON stop_times (trip_id);""") cursor.execute("""CREATE INDEX stop_index ON stop_times (stop_id);""") def GetStopBoundingBox(self): return (min(s.stop_lat for s in self.stops.values()), min(s.stop_lon for s in self.stops.values()), max(s.stop_lat for s in self.stops.values()), max(s.stop_lon for s in self.stops.values()), ) def AddAgency(self, name, url, timezone, agency_id=None): agency = self._gtfs_factory.Agency(name, url, timezone, agency_id) self.AddAgencyObject(agency) return agency def AddAgencyObject(self, agency, problem_reporter=None, validate=False): assert agency._schedule is None if not problem_reporter: problem_reporter = self.problem_reporter if agency.agency_id in self._agencies: problem_reporter.DuplicateID('agency_id', agency.agency_id) return self.AddTableColumns('agency', agency._ColumnNames()) agency._schedule = weakref.proxy(self) if validate: agency.Validate(problem_reporter) self._agencies[agency.agency_id] = agency def GetAgency(self, agency_id): return self._agencies[agency_id] def GetDefaultAgency(self): if not self._default_agency: if len(self._agencies) == 0: self.NewDefaultAgency() elif len(self._agencies) == 1: self._default_agency = self._agencies.values()[0] return self._default_agency def NewDefaultAgency(self, **kwargs): agency = self._gtfs_factory.Agency(**kwargs) if not agency.agency_id: agency.agency_id = util.FindUniqueId(self._agencies) self._default_agency = agency self.SetDefaultAgency(agency, validate=False) return agency def SetDefaultAgency(self, agency, validate=True): assert isinstance(agency, self._gtfs_factory.Agency) self._default_agency = agency if agency.agency_id not in self._agencies: self.AddAgencyObject(agency, validate=validate) def GetAgencyList(self): return self._agencies.values() def GetServicePeriod(self, service_id): return self.service_periods[service_id] def GetDefaultServicePeriod(self): if not self._default_service_period: if len(self.service_periods) == 0: self.NewDefaultServicePeriod() elif len(self.service_periods) == 1: self._default_service_period = self.service_periods.values()[0] return self._default_service_period def NewDefaultServicePeriod(self): service_period = self._gtfs_factory.ServicePeriod() service_period.service_id = util.FindUniqueId(self.service_periods) self.SetDefaultServicePeriod(service_period, validate=False) return service_period def SetDefaultServicePeriod(self, service_period, validate=True): assert isinstance(service_period, self._gtfs_factory.ServicePeriod) self._default_service_period = service_period if service_period.service_id not in self.service_periods: self.AddServicePeriodObject(service_period, validate=validate) def AddServicePeriodObject(self, service_period, problem_reporter=None, validate=True): if not problem_reporter: problem_reporter = self.problem_reporter if service_period.service_id in self.service_periods: problem_reporter.DuplicateID('service_id', service_period.service_id) return if validate: service_period.Validate(problem_reporter) self.service_periods[service_period.service_id] = service_period def GetServicePeriodList(self): return self.service_periods.values() def GetDateRange(self): (minvalue, maxvalue, minorigin, maxorigin) = self.GetDateRangeWithOrigins() return (minvalue, maxvalue) def GetDateRangeWithOrigins(self): period_list = self.GetServicePeriodList() ranges = [period.GetDateRange() for period in period_list] starts = filter(lambda x: x, [item[0] for item in ranges]) ends = filter(lambda x: x, [item[1] for item in ranges]) if not starts or not ends: return (None, None, None, None) minvalue, minindex = min(itertools.izip(starts, itertools.count())) maxvalue, maxindex = max(itertools.izip(ends, itertools.count())) minreason = (period_list[minindex].HasDateExceptionOn(minvalue) and "earliest service exception date in calendar_dates.txt" or "earliest service date in calendar.txt") maxreason = (period_list[maxindex].HasDateExceptionOn(maxvalue) and "last service exception date in calendar_dates.txt" or "last service date in calendar.txt") if self.feed_info and self.feed_info.feed_start_date: minvalue = self.feed_info.feed_start_date minreason = "feed_start_date in feed_info.txt" if self.feed_info and self.feed_info.feed_end_date: maxvalue = self.feed_info.feed_end_date maxreason = "feed_end_date in feed_info.txt" return (minvalue, maxvalue, minreason, maxreason) def GetServicePeriodsActiveEachDate(self, date_start, date_end): date_it = date_start one_day = datetime.timedelta(days=1) date_service_period_list = [] while date_it < date_end: periods_today = [] date_it_string = date_it.strftime("%Y%m%d") for service in self.GetServicePeriodList(): if service.IsActiveOn(date_it_string, date_it): periods_today.append(service) date_service_period_list.append((date_it, periods_today)) date_it += one_day return date_service_period_list def AddStop(self, lat, lng, name, stop_id=None): if stop_id is None: stop_id = util.FindUniqueId(self.stops) stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name) self.AddStopObject(stop) return stop def AddStopObject(self, stop, problem_reporter=None): assert stop._schedule is None if not problem_reporter: problem_reporter = self.problem_reporter if not stop.stop_id: return if stop.stop_id in self.stops: problem_reporter.DuplicateID('stop_id', stop.stop_id) return stop._schedule = weakref.proxy(self) self.AddTableColumns('stops', stop._ColumnNames()) self.stops[stop.stop_id] = stop if hasattr(stop, 'zone_id') and stop.zone_id: self.fare_zones[stop.zone_id] = True def GetStopList(self): return self.stops.values() def AddRoute(self, short_name, long_name, route_type, route_id=None): if route_id is None: route_id = util.FindUniqueId(self.routes) route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name, route_type=route_type, route_id=route_id) route.agency_id = self.GetDefaultAgency().agency_id self.AddRouteObject(route) return route def AddRouteObject(self, route, problem_reporter=None): if not problem_reporter: problem_reporter = self.problem_reporter if route.route_id in self.routes: problem_reporter.DuplicateID('route_id', route.route_id) return if route.agency_id not in self._agencies: if not route.agency_id and len(self._agencies) == 1: pass else: problem_reporter.InvalidValue('agency_id', route.agency_id, 'Route uses an unknown agency_id.') return self.AddTableColumns('routes', route._ColumnNames()) route._schedule = weakref.proxy(self) self.routes[route.route_id] = route def GetRouteList(self): return self.routes.values() def GetRoute(self, route_id): return self.routes[route_id] def AddShapeObject(self, shape, problem_reporter=None): if not problem_reporter: problem_reporter = self.problem_reporter shape.Validate(problem_reporter) if shape.shape_id in self._shapes: problem_reporter.DuplicateID('shape_id', shape.shape_id) return self._shapes[shape.shape_id] = shape def GetShapeList(self): return self._shapes.values() def GetShape(self, shape_id): return self._shapes[shape_id] def AddTripObject(self, trip, problem_reporter=None, validate=False): if not problem_reporter: problem_reporter = self.problem_reporter if trip.trip_id in self.trips: problem_reporter.DuplicateID('trip_id', trip.trip_id) return self.AddTableColumns('trips', trip._ColumnNames()) trip._schedule = weakref.proxy(self) self.trips[trip.trip_id] = trip if validate: if not problem_reporter: problem_reporter = self.problem_reporter trip.Validate(problem_reporter, validate_children=False) try: self.routes[trip.route_id]._AddTripObject(trip) except KeyError: pass def GetTripList(self): return self.trips.values() def GetTrip(self, trip_id): return self.trips[trip_id] def AddFareObject(self, fare, problem_reporter=None): warnings.warn("No longer supported. The Fare class was renamed to " "FareAttribute, and all related functions were renamed " "accordingly.", DeprecationWarning) self.AddFareAttributeObject(fare, problem_reporter) def AddFareAttributeObject(self, fare, problem_reporter=None): if not problem_reporter: problem_reporter = self.problem_reporter fare.Validate(problem_reporter) if fare.fare_id in self.fares: problem_reporter.DuplicateID('fare_id', fare.fare_id) return self.fares[fare.fare_id] = fare def GetFareList(self): warnings.warn("No longer supported. The Fare class was renamed to " "FareAttribute, and all related functions were renamed " "accordingly.", DeprecationWarning) return self.GetFareAttributeList() def GetFareAttributeList(self): return self.fares.values() def GetFare(self, fare_id): warnings.warn("No longer supported. The Fare class was renamed to " "FareAttribute, and all related functions were renamed " "accordingly.", DeprecationWarning) return self.GetFareAttribute(fare_id) def GetFareAttribute(self, fare_id): return self.fares[fare_id] def AddFareRuleObject(self, rule, problem_reporter=None): if not problem_reporter: problem_reporter = self.problem_reporter if util.IsEmpty(rule.fare_id): problem_reporter.MissingValue('fare_id') return if rule.route_id and rule.route_id not in self.routes: problem_reporter.InvalidValue('route_id', rule.route_id) if rule.origin_id and rule.origin_id not in self.fare_zones: problem_reporter.InvalidValue('origin_id', rule.origin_id) if rule.destination_id and rule.destination_id not in self.fare_zones: problem_reporter.InvalidValue('destination_id', rule.destination_id) if rule.contains_id and rule.contains_id not in self.fare_zones: problem_reporter.InvalidValue('contains_id', rule.contains_id) if rule.fare_id in self.fares: self.GetFareAttribute(rule.fare_id).rules.append(rule) else: problem_reporter.InvalidValue('fare_id', rule.fare_id, '(This fare_id doesn\'t correspond to any ' 'of the IDs defined in the ' 'fare attributes.)') def AddFeedInfoObject(self, feed_info, problem_reporter=None, validate=False): assert feed_info._schedule is None if not problem_reporter: problem_reporter = self.problem_reporter feed_info._schedule = weakref.proxy(self) if validate: feed_info.Validate(problem_reporter) self.AddTableColumns('feed_info', feed_info._ColumnNames()) self.feed_info = feed_info def AddTransferObject(self, transfer, problem_reporter=None): assert transfer._schedule is None, "only add Transfer to a schedule once" if not problem_reporter: problem_reporter = self.problem_reporter transfer_id = transfer._ID() if transfer_id in self._transfers: self.problem_reporter.DuplicateID(self._gtfs_factory.Transfer._ID_COLUMNS, transfer_id, type=problems_module.TYPE_WARNING) transfer._schedule = weakref.proxy(self) self.AddTableColumns('transfers', transfer._ColumnNames()) self._transfers[transfer_id].append(transfer) def GetTransferIter(self): return itertools.chain(*self._transfers.values()) def GetTransferList(self): return list(self.GetTransferIter()) def GetStop(self, id): return self.stops[id] def GetFareZones(self): return self.fare_zones.keys() def GetNearestStops(self, lat, lon, n=1): dist_stop_list = [] for s in self.stops.values(): dist = (s.stop_lat - lat)**2 + (s.stop_lon - lon)**2 if len(dist_stop_list) < n: bisect.insort(dist_stop_list, (dist, s)) elif dist < dist_stop_list[-1][0]: bisect.insort(dist_stop_list, (dist, s)) dist_stop_list.pop() return [stop for dist, stop in dist_stop_list] def GetStopsInBoundingBox(self, north, east, south, west, n): stop_list = [] for s in self.stops.values(): if (s.stop_lat <= north and s.stop_lat >= south and s.stop_lon <= east and s.stop_lon >= west): stop_list.append(s) if len(stop_list) == n: break return stop_list def Load(self, feed_path, extra_validation=False): loader = self._gtfs_factory.Loader(feed_path, self, problems=self.problem_reporter, extra_validation=extra_validation) loader.Load() def _WriteArchiveString(self, archive, filename, stringio): zi = zipfile.ZipInfo(filename) zi.external_attr = 0o666 << 16 zi.compress_type = zipfile.ZIP_DEFLATED archive.writestr(zi, stringio.getvalue()) def WriteGoogleTransitFeed(self, file): archive = zipfile.ZipFile(file, 'w') if 'agency' in self._table_columns: agency_string = StringIO() writer = util.CsvUnicodeWriter(agency_string) columns = self.GetTableColumns('agency') writer.writerow(columns) for a in self._agencies.values(): writer.writerow([util.EncodeUnicode(a[c]) for c in columns]) self._WriteArchiveString(archive, 'agency.txt', agency_string) if 'feed_info' in self._table_columns: feed_info_string = StringIO() writer = util.CsvUnicodeWriter(feed_info_string) columns = self.GetTableColumns('feed_info') writer.writerow(columns) writer.writerow([util.EncodeUnicode(self.feed_info[c]) for c in columns]) self._WriteArchiveString(archive, 'feed_info.txt', feed_info_string) calendar_dates_string = StringIO() writer = util.CsvUnicodeWriter(calendar_dates_string) writer.writerow( self._gtfs_factory.ServicePeriod._FIELD_NAMES_CALENDAR_DATES) has_data = False for period in self.service_periods.values(): for row in period.GenerateCalendarDatesFieldValuesTuples(): has_data = True writer.writerow(row) wrote_calendar_dates = False if has_data: wrote_calendar_dates = True self._WriteArchiveString(archive, 'calendar_dates.txt', calendar_dates_string) calendar_string = StringIO() writer = util.CsvUnicodeWriter(calendar_string) writer.writerow(self._gtfs_factory.ServicePeriod._FIELD_NAMES) has_data = False for s in self.service_periods.values(): row = s.GetCalendarFieldValuesTuple() if row: has_data = True writer.writerow(row) if has_data or not wrote_calendar_dates: self._WriteArchiveString(archive, 'calendar.txt', calendar_string) if 'stops' in self._table_columns: stop_string = StringIO() writer = util.CsvUnicodeWriter(stop_string) columns = self.GetTableColumns('stops') writer.writerow(columns) for s in self.stops.values(): writer.writerow([util.EncodeUnicode(s[c]) for c in columns]) self._WriteArchiveString(archive, 'stops.txt', stop_string) if 'routes' in self._table_columns: route_string = StringIO() writer = util.CsvUnicodeWriter(route_string) columns = self.GetTableColumns('routes') writer.writerow(columns) for r in self.routes.values(): writer.writerow([util.EncodeUnicode(r[c]) for c in columns]) self._WriteArchiveString(archive, 'routes.txt', route_string) if 'trips' in self._table_columns: trips_string = StringIO() writer = util.CsvUnicodeWriter(trips_string) columns = self.GetTableColumns('trips') writer.writerow(columns) for t in self.trips.values(): writer.writerow([util.EncodeUnicode(t[c]) for c in columns]) self._WriteArchiveString(archive, 'trips.txt', trips_string) headway_rows = [] for trip in self.GetTripList(): headway_rows += trip.GetFrequencyOutputTuples() if headway_rows: headway_string = StringIO() writer = util.CsvUnicodeWriter(headway_string) writer.writerow(self._gtfs_factory.Frequency._FIELD_NAMES) writer.writerows(headway_rows) self._WriteArchiveString(archive, 'frequencies.txt', headway_string) if self.GetFareAttributeList(): fare_string = StringIO() writer = util.CsvUnicodeWriter(fare_string) writer.writerow(self._gtfs_factory.FareAttribute._FIELD_NAMES) writer.writerows( f.GetFieldValuesTuple() for f in self.GetFareAttributeList()) self._WriteArchiveString(archive, 'fare_attributes.txt', fare_string) rule_rows = [] for fare in self.GetFareAttributeList(): for rule in fare.GetFareRuleList(): rule_rows.append(rule.GetFieldValuesTuple()) if rule_rows: rule_string = StringIO() writer = util.CsvUnicodeWriter(rule_string) writer.writerow(self._gtfs_factory.FareRule._FIELD_NAMES) writer.writerows(rule_rows) self._WriteArchiveString(archive, 'fare_rules.txt', rule_string) stop_times_string = StringIO() writer = util.CsvUnicodeWriter(stop_times_string) writer.writerow(self._gtfs_factory.StopTime._FIELD_NAMES) for t in self.trips.values(): writer.writerows(t._GenerateStopTimesTuples()) self._WriteArchiveString(archive, 'stop_times.txt', stop_times_string) shape_rows = [] for shape in self.GetShapeList(): seq = 1 for (lat, lon, dist) in shape.points: shape_rows.append((shape.shape_id, lat, lon, seq, dist)) seq += 1 if shape_rows: shape_string = StringIO() writer = util.CsvUnicodeWriter(shape_string) writer.writerow(self._gtfs_factory.Shape._FIELD_NAMES) writer.writerows(shape_rows) self._WriteArchiveString(archive, 'shapes.txt', shape_string) if 'transfers' in self._table_columns: transfer_string = StringIO() writer = util.CsvUnicodeWriter(transfer_string) columns = self.GetTableColumns('transfers') writer.writerow(columns) for t in self.GetTransferIter(): writer.writerow([util.EncodeUnicode(t[c]) for c in columns]) self._WriteArchiveString(archive, 'transfers.txt', transfer_string) archive.close()
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/page_request.py
PageRequest.rotate
python
def rotate(self): return self._rotate
Gets the rotate of this PageRequest. # noqa: E501 Sets the direction the page image is rotated. The possible settings are: left or right # noqa: E501 :return: The rotate of this PageRequest. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/page_request.py#L82-L90
import pprint import re import six from docusign_esign.client.configuration import Configuration class PageRequest(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'password': 'str', 'rotate': 'str' } attribute_map = { 'password': 'password', 'rotate': 'rotate' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._password = None self._rotate = None self.discriminator = None setattr(self, "_{}".format('password'), kwargs.get('password', None)) setattr(self, "_{}".format('rotate'), kwargs.get('rotate', None)) @property def password(self): return self._password @password.setter def password(self, password): self._password = password @property
MIT License
google/citest
citest/gcp_testing/gcp_agent.py
GcpAgent.default_variables
python
def default_variables(self): return self.__default_variables
Default variables for method invocations.
https://github.com/google/citest/blob/eda9171eed35b82ce6f048229bebd898edc25369/citest/gcp_testing/gcp_agent.py#L177-L179
import json import logging import apiclient import httplib2 from oauth2client.client import GoogleCredentials from oauth2client.service_account import ServiceAccountCredentials from citest.base import JournalLogger from citest.service_testing import BaseAgent import citest logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR) PLATFORM_READ_ONLY_SCOPE = ( 'https://www.googleapis.com/auth/cloud-platform.read-only' ) PLATFORM_FULL_SCOPE = 'https://www.googleapis.com/auth/cloud-platform' class GcpAgent(BaseAgent): @classmethod def default_discovery_name_and_version(cls): raise NotImplementedError() @classmethod def download_discovery_document(cls, api=None, version=None): if api is None: default_api, default_version = cls.default_discovery_name_and_version() api = default_api version = version or default_version if version is None: version = GcpAgent.determine_current_version(api) http = httplib2.Http() http = apiclient.http.set_user_agent( http, 'citest/{version}'.format(version=citest.__version__)) service = apiclient.discovery.build('discovery', 'v1', http=http) return service.apis().getRest(api=api, version=version).execute() @staticmethod def determine_current_version(api): discovery = GcpAgent.make_service('discovery', 'v1') response = discovery.apis().list(name=api, preferred=True).execute() if not response.get('items'): raise ValueError('Unknown API "{0}".'.format(api)) return response['items'][0]['version'] @classmethod def make_service(cls, api=None, version=None, scopes=None, credentials_path=None, logger=None): credentials_path = credentials_path or None logger = logger or logging.getLogger(__name__) if api is None: default_api, default_version = cls.default_discovery_name_and_version() api = default_api version = version or default_version if version is None: version = GcpAgent.determine_current_version(api) http = httplib2.Http() http = apiclient.http.set_user_agent( http, 'citest/{version}'.format(version=citest.__version__)) credentials = None if credentials_path is not None: logger.info('Authenticating %s %s', api, version) credentials = ServiceAccountCredentials.from_json_keyfile_name( credentials_path, scopes=scopes) else: credentials = GoogleCredentials.get_application_default() if scopes and credentials.create_scoped_required(): credentials = credentials.create_scoped(scopes) http = credentials.authorize(http) logger.info('Constructing %s service...', api) return apiclient.discovery.build(api, version, http=http) @classmethod def make_agent(cls, api=None, version=None, scopes=None, credentials_path=None, default_variables=None, **kwargs): if version is None and api is not None: version = GcpAgent.determine_current_version(api) service = cls.make_service(api, version, scopes, credentials_path) discovery_doc = cls.download_discovery_document(api=api, version=version) return cls(service, discovery_doc, default_variables, **kwargs) @property def service(self): return self.__service @property def discovery_document(self): return self.__discovery_doc @property
Apache License 2.0
sebr/bhyve-home-assistant
custom_components/bhyve/pybhyve/client.py
Client.stop
python
async def stop(self): if self._websocket is not None: await self._websocket.stop()
Stop the websocket.
https://github.com/sebr/bhyve-home-assistant/blob/8324c30ad8a6ccd00fe4bf1113dd2fdeeb4a9c09/custom_components/bhyve/pybhyve/client.py#L155-L158
import logging import re import time from asyncio import ensure_future from .const import ( API_HOST, API_POLL_PERIOD, DEVICES_PATH, DEVICE_HISTORY_PATH, TIMER_PROGRAMS_PATH, LOGIN_PATH, WS_HOST, ) from .errors import RequestError from .websocket import OrbitWebsocket _LOGGER = logging.getLogger(__name__) class Client: def __init__( self, username: str, password: str, loop, session, async_callback ) -> None: self._username: str = username self._password: int = password self._ws_url: str = WS_HOST self._token: str = None self._websocket = None self._loop = loop self._session = session self._async_callback = async_callback self._devices = [] self._last_poll_devices = 0 self._timer_programs = [] self._last_poll_programs = 0 self._device_histories = dict() self._last_poll_device_histories = 0 async def _request( self, method: str, endpoint: str, params: dict = None, json: dict = None ) -> list: url: str = f"{API_HOST}{endpoint}" if not params: params = {} headers = { "Accept": "application/json, text/plain, */*", "Host": re.sub("https?://", "", API_HOST), "Content-Type": "application/json; charset=utf-8;", "Referer": API_HOST, "Orbit-Session-Token": self._token or "", } headers["User-Agent"] = ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/72.0.3626.81 Safari/537.36" ) async with self._session.request( method, url, params=params, headers=headers, json=json ) as resp: try: resp.raise_for_status() return await resp.json(content_type=None) except Exception as err: raise RequestError(f"Error requesting data from {url}: {err}") async def _refresh_devices(self, force_update=False): now = time.time() if force_update: _LOGGER.info("Forcing device refresh") elif now - self._last_poll_devices < API_POLL_PERIOD: return self._devices = await self._request( "get", DEVICES_PATH, params={"t": str(time.time())} ) self._last_poll_devices = now async def _refresh_timer_programs(self, force_update=False): now = time.time() if force_update: _LOGGER.debug("Forcing device refresh") elif now - self._last_poll_programs < API_POLL_PERIOD: return self._timer_programs = await self._request( "get", TIMER_PROGRAMS_PATH, params={"t": str(time.time())} ) self._last_poll_programs = now async def _refresh_device_history(self, device_id, force_update=False): now = time.time() if force_update: _LOGGER.info("Forcing refresh of device history %s", device_id) elif now - self._last_poll_device_histories < API_POLL_PERIOD: return device_history = await self._request( "get", DEVICE_HISTORY_PATH.format(device_id), params={"t": str(time.time()), "page": str(1), "per-page": str(10),}, ) self._device_histories.update({device_id: device_history}) self._last_poll_device_histories = now async def _async_ws_handler(self, data): if self._async_callback: ensure_future(self._async_callback(data)) async def login(self) -> bool: url: str = f"{API_HOST}{LOGIN_PATH}" json = {"session": {"email": self._username, "password": self._password}} async with self._session.request("post", url, json=json) as resp: try: resp.raise_for_status() response = await resp.json(content_type=None) _LOGGER.debug("Logged in") self._token = response["orbit_session_token"] except Exception as err: raise RequestError(f"Error requesting data from {url}: {err}") if self._token is None: return False self._websocket = OrbitWebsocket( token=self._token, loop=self._loop, session=self._session, url=self._ws_url, async_callback=self._async_ws_handler, ) self._websocket.start() return True
MIT License
line/line-bot-sdk-python
linebot/models/messages.py
LocationMessage.__init__
python
def __init__(self, id=None, title=None, address=None, latitude=None, longitude=None, **kwargs): super(LocationMessage, self).__init__(id=id, **kwargs) self.type = 'location' self.title = title self.address = address self.latitude = latitude self.longitude = longitude
__init__ method. :param str id: Message ID :param str title: Title :param str address: Address :param float latitude: Latitude :param float longitude: Longitude :param kwargs:
https://github.com/line/line-bot-sdk-python/blob/914a2d5520ffb68a2f0cc6894006902a42f66c71/linebot/models/messages.py#L186-L203
from abc import ABCMeta from future.utils import with_metaclass from linebot.models.emojis import Emojis from .mention import Mention from .mentionee import Mentionee from .base import Base class Message(with_metaclass(ABCMeta, Base)): def __init__(self, id=None, **kwargs): super(Message, self).__init__(**kwargs) self.type = None self.id = id class TextMessage(Message): def __init__(self, id=None, text=None, emojis=None, mention=None, **kwargs): super(TextMessage, self).__init__(id=id, **kwargs) self.type = 'text' self.text = text if emojis: new_emojis = [] for emoji in emojis: emoji_object = self.get_or_new_from_json_dict( emoji, Emojis ) if emoji_object: new_emojis.append(emoji_object) self.emojis = new_emojis else: self.emojis = emojis if mention: mention_object = self.get_or_new_from_json_dict( mention, Mention ) mentionees = [] for mentionee in mention_object.mentionees: mentionee_object = self.get_or_new_from_json_dict( mentionee, Mentionee ) if mentionee_object: mentionees.append(mentionee_object) self.mention = Mention(mentionees) else: self.mention = mention class ImageMessage(Message): def __init__(self, id=None, content_provider=None, image_set=None, **kwargs): super(ImageMessage, self).__init__(id=id, **kwargs) self.type = 'image' self.content_provider = self.get_or_new_from_json_dict( content_provider, ContentProvider ) self.image_set = self.get_or_new_from_json_dict( image_set, ImageSet ) class VideoMessage(Message): def __init__(self, id=None, duration=None, content_provider=None, **kwargs): super(VideoMessage, self).__init__(id=id, **kwargs) self.type = 'video' self.duration = duration self.content_provider = self.get_or_new_from_json_dict( content_provider, ContentProvider ) class AudioMessage(Message): def __init__(self, id=None, duration=None, content_provider=None, **kwargs): super(AudioMessage, self).__init__(id=id, **kwargs) self.type = 'audio' self.duration = duration self.content_provider = self.get_or_new_from_json_dict( content_provider, ContentProvider ) class LocationMessage(Message):
Apache License 2.0
dials/dials
algorithms/integration/processor.py
_Manager.initialize
python
def initialize(self): start_time = time() assert "bbox" in self.reflections, "Reflections have no bbox" if self.params.mp.nproc is libtbx.Auto: self.params.mp.nproc = available_cores() logger.info(f"Setting nproc={self.params.mp.nproc}") self.compute_jobs() self.split_reflections() self.compute_processors() self.manager = ReflectionManager(self.jobs, self.reflections) self.time.initialize = time() - start_time
Initialise the processing
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/integration/processor.py#L613-L636
import itertools import logging import math from time import time import psutil import boost_adaptbx.boost.python import libtbx import dials.algorithms.integration import dials.util import dials.util.log from dials.array_family import flex from dials.model.data import make_image from dials.util import tabulate from dials.util.log import rehandle_cached_records from dials.util.mp import available_cores, multi_node_parallel_map from dials_algorithms_integration_integrator_ext import ( Executor, Group, GroupList, Job, JobList, ReflectionManager, ReflectionManagerPerImage, ShoeboxProcessor, ) try: import resource except ImportError: resource = None __all__ = [ "Block", "build_processor", "Debug", "Executor", "Group", "GroupList", "Job", "job", "JobList", "Lookup", "MultiProcessing", "NullTask", "Parameters", "Processor2D", "Processor3D", "ProcessorFlat3D", "ProcessorSingle2D", "ProcessorStills", "ReflectionManager", "ReflectionManagerPerImage", "Shoebox", "ShoeboxProcessor", "Task", ] logger = logging.getLogger(__name__) def assess_available_memory(params): available_memory = psutil.virtual_memory().available available_swap = psutil.swap_memory().free available_incl_swap = available_memory + available_swap available_limit = available_incl_swap * params.block.max_memory_usage available_immediate_limit = available_memory * params.block.max_memory_usage report = [ "Memory situation report:", ] def _report(description, numbytes): report.append(f" {description:<50}:{numbytes/1e9:5.1f} GB") _report("Available system memory (excluding swap)", available_memory) _report("Available swap memory", available_swap) _report("Available system memory (including swap)", available_incl_swap) _report("Maximum memory for processing (including swap)", available_limit) _report( "Maximum memory for processing (excluding swap)", available_immediate_limit, ) rlimit = getattr(resource, "RLIMIT_VMEM", getattr(resource, "RLIMIT_AS", None)) if rlimit: try: ulimit = resource.getrlimit(rlimit)[0] if ulimit <= 0 or ulimit > (2 ** 62): report.append(" no memory ulimit set") else: ulimit_used = psutil.Process().memory_info().rss _report("Memory ulimit detected", ulimit) _report("Memory ulimit in use", ulimit_used) available_memory = max(0, min(available_memory, ulimit - ulimit_used)) available_incl_swap = max( 0, min(available_incl_swap, ulimit - ulimit_used) ) available_immediate_limit = ( available_memory * params.block.max_memory_usage ) _report("Available system memory (limited)", available_memory) _report( "Available system memory (incl. swap; limited)", available_incl_swap, ) _report( "Maximum memory for processing (exc. swap; limited)", available_immediate_limit, ) except Exception as e: logger.debug( "Could not obtain ulimit values due to %s", str(e), exc_info=True ) return available_immediate_limit, available_incl_swap, report def _average_bbox_size(reflections): bbox = reflections["bbox"] sel = flex.random_selection(len(bbox), min(len(bbox), 1000)) subset_bbox = bbox.select(sel) xmin, xmax, ymin, ymax, zmin, zmax = subset_bbox.parts() xsize = flex.mean((xmax - xmin).as_double()) ysize = flex.mean((ymax - ymin).as_double()) zsize = flex.mean((zmax - zmin).as_double()) return xsize, ysize, zsize @boost_adaptbx.boost.python.inject_into(Executor) class _: @staticmethod def __getinitargs__(): return () class _Job: def __init__(self): self.index = 0 self.nthreads = 1 job = _Job() class MultiProcessing: def __init__(self): self.method = "multiprocessing" self.nproc = 1 self.njobs = 1 self.nthreads = 1 self.n_subset_split = None def update(self, other): self.method = other.method self.nproc = other.nproc self.njobs = other.njobs self.nthreads = other.nthreads self.n_subset_split = other.n_subset_split class Lookup: def __init__(self): self.mask = None def update(self, other): self.mask = other.mask class Block: def __init__(self): self.size = libtbx.Auto self.units = "degrees" self.threshold = 0.99 self.force = False self.max_memory_usage = 0.90 def update(self, other): self.size = other.size self.units = other.units self.threshold = other.threshold self.force = other.force self.max_memory_usage = other.max_memory_usage class Shoebox: def __init__(self): self.flatten = False self.partials = False def update(self, other): self.flatten = other.flatten self.partials = other.partials class Debug: def __init__(self): self.output = False self.select = None self.split_experiments = True self.separate_files = True def update(self, other): self.output = other.output self.select = other.select self.split_experiments = other.split_experiments self.separate_files = other.separate_files class Parameters: def __init__(self): self.mp = MultiProcessing() self.lookup = Lookup() self.block = Block() self.shoebox = Shoebox() self.debug = Debug() def update(self, other): self.mp.update(other.mp) self.lookup.update(other.lookup) self.block.update(other.block) self.shoebox.update(other.shoebox) self.debug.update(other.debug) def execute_parallel_task(task): dials.util.log.config_simple_cached() result = task() handlers = logging.getLogger("dials").handlers assert len(handlers) == 1, "Invalid number of logging handlers" return result, handlers[0].records class _Processor: def __init__(self, manager): self.manager = manager @property def executor(self): return self.manager.executor @executor.setter def executor(self, function): self.manager.executor = function def process(self): start_time = time() self.manager.initialize() mp_method = self.manager.params.mp.method mp_njobs = self.manager.params.mp.njobs mp_nproc = self.manager.params.mp.nproc assert mp_nproc > 0, "Invalid number of processors" if mp_nproc * mp_njobs > len(self.manager): mp_nproc = min(mp_nproc, len(self.manager)) mp_njobs = int(math.ceil(len(self.manager) / mp_nproc)) logger.info(self.manager.summary()) if mp_njobs > 1: assert mp_method != "none" and mp_method is not None logger.info( " Using %s with %d parallel job(s) and %d processes per node\n", mp_method, mp_njobs, mp_nproc, ) else: logger.info(" Using multiprocessing with %d parallel job(s)\n", mp_nproc) if mp_njobs * mp_nproc > 1: def process_output(result): rehandle_cached_records(result[1]) self.manager.accumulate(result[0]) multi_node_parallel_map( func=execute_parallel_task, iterable=list(self.manager.tasks()), njobs=mp_njobs, nproc=mp_nproc, callback=process_output, cluster_method=mp_method, preserve_order=True, ) else: for task in self.manager.tasks(): self.manager.accumulate(task()) self.manager.finalize() end_time = time() self.manager.time.user_time = end_time - start_time result1, result2 = self.manager.result() return result1, result2, self.manager.time class _ProcessorRot(_Processor): def __init__(self, experiments, manager): if not experiments.all_sequences(): raise RuntimeError( """ An inappropriate processing algorithm may have been selected! Trying to perform rotation processing when not all experiments are indicated as rotation experiments. """ ) super().__init__(manager) class NullTask: def __init__(self, index, reflections): self.index = index self.reflections = reflections def __call__(self): return dials.algorithms.integration.Result( index=self.index, reflections=self.reflections, data=None, read_time=0, extract_time=0, process_time=0, total_time=0, ) class Task: def __init__(self, index, job, experiments, reflections, params, executor=None): assert executor is not None, "No executor given" assert len(reflections) > 0, "Zero reflections given" self.index = index self.job = job self.experiments = experiments self.reflections = reflections self.params = params self.executor = executor def __call__(self): start_time = time() job.index = self.index exp_id = list(set(self.reflections["id"])) imageset = self.experiments[exp_id[0]].imageset for i in exp_id[1:]: assert ( self.experiments[i].imageset == imageset ), "Task can only handle 1 imageset" frame0, frame1 = self.job try: allowed_range = imageset.get_array_range() except Exception: allowed_range = 0, len(imageset) try: assert frame0 < frame1 assert allowed_range[1] > allowed_range[0] assert frame0 >= allowed_range[0] assert frame1 <= allowed_range[1] assert (frame1 - frame0) <= len(imageset) if len(imageset) > 1: imageset = imageset[frame0:frame1] except Exception as e: raise RuntimeError(f"Programmer Error: bad array range: {e}") try: frame0, frame1 = imageset.get_array_range() except Exception: frame0, frame1 = (0, len(imageset)) self.executor.initialize(frame0, frame1, self.reflections) self.reflections["shoebox"] = flex.shoebox( self.reflections["panel"], self.reflections["bbox"], allocate=False, flatten=self.params.shoebox.flatten, ) processor = ShoeboxProcessor( self.reflections, len(imageset.get_detector()), frame0, frame1, self.params.debug.output, ) read_time = 0.0 for i in range(len(imageset)): st = time() image = imageset.get_corrected_data(i) if imageset.is_marked_for_rejection(i): mask = tuple(flex.bool(im.accessor(), False) for im in image) else: mask = imageset.get_mask(i) if self.params.lookup.mask is not None: assert len(mask) == len( self.params.lookup.mask ), "Mask/Image are incorrect size %d %d" % ( len(mask), len(self.params.lookup.mask), ) mask = tuple( m1 & m2 for m1, m2 in zip(self.params.lookup.mask, mask) ) read_time += time() - st processor.next(make_image(image, mask), self.executor) del image del mask assert processor.finished(), "Data processor is not finished" if self.params.debug.output and self.params.debug.separate_files: output = self.reflections if self.params.debug.select is not None: output = output.select(self.params.debug.select(output)) if self.params.debug.split_experiments: output = output.split_by_experiment_id() for table in output: i = table["id"][0] table.as_file("shoeboxes_%d_%d.refl" % (self.index, i)) else: output.as_file("shoeboxes_%d.refl" % self.index) if self.params.debug.separate_files or not self.params.debug.output: del self.reflections["shoebox"] self.executor.finalize() return dials.algorithms.integration.Result( index=self.index, reflections=self.reflections, data=self.executor.data(), read_time=read_time, extract_time=processor.extract_time(), process_time=processor.process_time(), total_time=time() - start_time, ) class _Manager: def __init__(self, experiments, reflections, params): self.executor = None self.experiments = experiments self.reflections = reflections self.data = {} self.params = params self.finalized = False self.time = dials.algorithms.integration.TimingInfo()
BSD 3-Clause New or Revised License
ggallohernandez/pymatriz
pymatriz/rest_client.py
RestClient.api_post
python
def api_post(self, path, data, retry=True): headers = {'authorization': f'Bearer {self.settings["token"]}', 'Origin': self.config["url"], 'content-type': 'application/json', 'x-csrf-token': self.settings["csrfToken"]} response = self.session.post(self._url(path), data, headers=headers, verify=self.config["ssl"] if "ssl" in self.config else True, proxies=self.config["proxies"] if "proxies" in self.config else None) if response.status_code == 401: if retry: self.update_token() self.api_post(path, data, False) else: raise ApiException("Authentication Fails.") elif response.status_code < 200 or response.status_code > 299: raise ApiException(f"Failure requesting {path}. Response ({response.status_code}): {response.text}") return response
Make a POST request to the API. :param path: path to the API resource. :type path: str :param retry: (optional) True: update the token and resend the request if the response code is 401. False: raise an exception if the response code is 401. :type retry: str :return: response of the API. :rtype: Response
https://github.com/ggallohernandez/pymatriz/blob/b76e45fa288efd60cf27b8d30706b03bc1b9689f/pymatriz/rest_client.py#L233-L260
import csv from collections import defaultdict from datetime import datetime, timedelta import pandas as pd from io import StringIO import requests import logging from http.client import HTTPConnection from lxml import html import re import base64 import json from pymatriz import urls, messages from pymatriz.client_interface import ApiClient from pymatriz.enums import Market, FieldType, HistoryFieldType from pymatriz.exceptions import ApiException class RestClient(ApiClient): def __init__(self, config): super().__init__(config) self.session = requests.Session() def update_token(self): username = self.config["username"] password = self.config["password"] client_id = "LlRkDft0IVkhGLXoknIaOtvlcc7eVAfX" auth0_api_id = "matriz.auth0.com" auth0_client = "eyJuYW1lIjoibG9jay5qcyIsInZlcnNpb24iOiIxMS41LjIiLCJsaWJfdmVyc2lvbiI6IjkuNC4yIn0=" realm = "dma3-prod-db" credential_type = "http://auth0.com/oauth/grant-type/password-realm" matriz_url = "https://mtzdma.primary.ventures" redirect_uri = "https://mtzdma.primary.ventures/auth/auth0/callback" payload = json.dumps({ "client_id": client_id, "username": username, "password": password, "realm": realm, "credential_type": credential_type }) headers = { 'authority': auth0_api_id, 'auth0-client': auth0_client, 'origin': matriz_url, 'referer': matriz_url, 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'cross-site', 'Content-Type': 'application/json', } response = self.session.request("POST", urls.authenticate, headers=headers, data=payload) url = urls.authorize.format(client_id=client_id, response_type="token", response_mode="form_post", redirect_uri=redirect_uri, connection=realm, realm=realm, scope="openid profile email", login_ticket=response.json()["login_ticket"], auth0_client=auth0_client) payload = {} headers = { 'authority': auth0_api_id, 'upgrade-insecure-requests': '1', 'sec-fetch-site': 'cross-site', 'sec-fetch-mode': 'navigate', 'sec-fetch-user': '?1', 'sec-fetch-dest': 'document', 'referer': matriz_url, } response = self.session.request("GET", url, headers=headers, data=payload) root = html.fromstring(response.text.encode('utf8')) payload = dict(map(lambda i: (i.name, i.value), root.xpath("//form/input"))) response = self.session.post(root.xpath("//form")[0].action, data=payload) settings = json.loads(base64.b64decode(re.search("var settings = '(.*)'", response.text).groups()[0])) self.settings = settings return settings def get_market_data(self): response = self.api_request(urls.market_data) df = pd.DataFrame(self.get_messages(response.text)) df = df.drop([FieldType.Type.value], axis=1) return df def get_intraday_history(self, tickers, **kwargs): return self._get_history_call(urls.historical_series_intraday, tickers, **kwargs) def get_daily_history(self, tickers, **kwargs): return self._get_history_call(urls.historical_series_daily, tickers, **kwargs) def _get_history_call(self, url_template, tickers, **kwargs): responses = [] start_date = datetime.fromordinal(kwargs["start_date"].toordinal()).isoformat("T") + "Z" if "start_date" in kwargs else (datetime.utcnow() - timedelta(days=1)).isoformat("T") + "Z" end_date = datetime.fromordinal(kwargs["end_date"].toordinal()).isoformat("T") + "Z" if "end_date" in kwargs else (datetime.utcnow() + timedelta(days=1)).isoformat("T") + "Z" instruments = self.build_instruments(tickers, **kwargs) for instrument in instruments: url = url_template.format(instrument=instrument.strip('"'), start_date=start_date, end_date=end_date) response = self.api_request(url) content = response.content.decode('utf-8') r = pd.read_csv(StringIO(content), sep=",") r.insert(0, HistoryFieldType.SymbolId.value, instrument.strip('"')) responses.extend(r.to_dict(orient='records')) df = pd.DataFrame(responses) if len(df) > 0: index = [] if HistoryFieldType.SymbolId.value in df: index.append(HistoryFieldType.SymbolId.value) if HistoryFieldType.Time.value in df: index.append(HistoryFieldType.Time.value) df.set_index(index, inplace=True) return df def get_all_instruments(self): response = self.api_request(urls.instruments) return pd.DataFrame(json.loads(response.text)) def market_data_subscription(self, tickers, **kwargs): instruments = self.build_instruments(tickers, **kwargs) instruments_string = ",".join(instruments) message = messages.MARKET_DATA_SUBSCRIPTION.format(instruments=instruments_string) response = self.api_post(urls.ws_subscribe.format(token=self.settings["token"], connection_id=self.settings["connId"]), message) def api_request(self, path, retry=True): headers = {'authorization': f'Bearer {self.settings["token"]}'} response = self.session.get(self._url(path), headers=headers, verify=self.config["ssl"] if "ssl" in self.config else True, proxies=self.config["proxies"] if "proxies" in self.config else None) if response.status_code == 401: if retry: self.update_token() self.api_request(path, False) else: raise ApiException("Authentication Fails.") elif response.status_code < 200 or response.status_code > 299: raise ApiException(f"Failure requesting {path}. Response ({response.status_code}): {response.text}") return response
MIT License
hacf-fr/renault-api
src/renault_api/credential_store.py
CredentialStore.get_value
python
def get_value(self, name: str) -> Optional[str]: if name in list(self._store.keys()): cred = self._store[name] if not cred.has_expired(): return cred.value return None
Get a credential value from the credential store.
https://github.com/hacf-fr/renault-api/blob/27b510275d19d2d81af420c029db37ab6ca0f203/src/renault_api/credential_store.py#L38-L44
import json import os from typing import Dict from typing import List from typing import Optional import jwt from renault_api.const import PERMANENT_KEYS from renault_api.credential import Credential from renault_api.credential import JWTCredential class CredentialStore: def __init__(self) -> None: self._store: Dict[str, Credential] = {} def __getitem__(self, name: str) -> Credential: if name in list(self._store.keys()): cred = self._store[name] if not cred.has_expired(): return cred raise KeyError(name) def get(self, name: str) -> Optional[Credential]: if name in list(self._store.keys()): cred = self._store[name] if not cred.has_expired(): return cred return None
MIT License
roclark/sportsipy
sportsipy/mlb/teams.py
Team.bases_on_balls
python
def bases_on_balls(self): return self._bases_on_balls
Returns an ``int`` of the number of bases on walks.
https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/mlb/teams.py#L888-L892
import pandas as pd import re from .constants import (ELEMENT_INDEX, PARSING_SCHEME, TEAM_ELEMENT, TEAM_STATS_URL) from functools import wraps from .. import utils from ..decorators import float_property_decorator, int_property_decorator from .mlb_utils import _retrieve_all_teams from .roster import Roster from .schedule import Schedule def mlb_int_property_decorator(func): @property @wraps(func) def wrapper(*args): value = func(*args) field = func.__name__ try: record = value.split('-') except AttributeError: return None try: return int(record[TEAM_ELEMENT[field]]) except (TypeError, ValueError, IndexError): return None return wrapper class Team: def __init__(self, team_name=None, team_data=None, rank=None, year=None, standings_file=None, teams_file=None): self._year = year self._rank = rank self._abbreviation = None self._name = None self._league = None self._games = None self._wins = None self._losses = None self._win_percentage = None self._streak = None self._runs = None self._runs_against = None self._run_difference = None self._strength_of_schedule = None self._simple_rating_system = None self._pythagorean_win_loss = None self._luck = None self._interleague_record = None self._home_record = None self._away_record = None self._extra_inning_record = None self._single_run_record = None self._record_vs_right_handed_pitchers = None self._record_vs_left_handed_pitchers = None self._record_vs_teams_over_500 = None self._record_vs_teams_under_500 = None self._last_ten_games_record = None self._last_twenty_games_record = None self._last_thirty_games_record = None self._number_players_used = None self._average_batter_age = None self._plate_appearances = None self._at_bats = None self._total_runs = None self._hits = None self._doubles = None self._triples = None self._home_runs = None self._runs_batted_in = None self._stolen_bases = None self._times_caught_stealing = None self._bases_on_balls = None self._times_struck_out = None self._batting_average = None self._on_base_percentage = None self._slugging_percentage = None self._on_base_plus_slugging_percentage = None self._on_base_plus_slugging_percentage_plus = None self._total_bases = None self._grounded_into_double_plays = None self._times_hit_by_pitch = None self._sacrifice_hits = None self._sacrifice_flies = None self._intentional_bases_on_balls = None self._runners_left_on_base = None self._number_of_pitchers = None self._average_pitcher_age = None self._runs_allowed_per_game = None self._earned_runs_against = None self._games_finished = None self._complete_games = None self._shutouts = None self._complete_game_shutouts = None self._saves = None self._innings_pitched = None self._hits_allowed = None self._home_runs_against = None self._bases_on_walks_given = None self._strikeouts = None self._hit_pitcher = None self._balks = None self._wild_pitches = None self._batters_faced = None self._earned_runs_against_plus = None self._fielding_independent_pitching = None self._whip = None self._hits_per_nine_innings = None self._home_runs_per_nine_innings = None self._bases_on_walks_given_per_nine_innings = None self._strikeouts_per_nine_innings = None self._strikeouts_per_base_on_balls = None self._opposing_runners_left_on_base = None if team_name: team_data = self._retrieve_team_data(year, team_name, standings_file, teams_file) self._parse_team_data(team_data) def __str__(self): return f'{self.name} ({self.abbreviation}) - {self._year}' def __repr__(self): return self.__str__() def _retrieve_team_data(self, year, team_name, standings_file=None, teams_file=None): team_data_dict, year = _retrieve_all_teams(year, standings_file, teams_file) self._year = year team_data = team_data_dict[team_name]['data'] self._rank = team_data_dict[team_name]['rank'] return team_data def _parse_name(self, team_data): name = team_data('a')[0].text name = re.sub(r'.*title="', '', str(name)) name = re.sub(r'".*', '', name) setattr(self, '_name', name) def _parse_team_data(self, team_data): for field in self.__dict__: short_field = str(field)[1:] if field == '_rank' or field == '_year': continue elif field == '_name': self._parse_name(team_data) continue index = 0 if short_field in ELEMENT_INDEX.keys(): index = ELEMENT_INDEX[short_field] value = utils._parse_field(PARSING_SCHEME, team_data, short_field, index) setattr(self, field, value) @property def dataframe(self): fields_to_include = { 'abbreviation': self.abbreviation, 'at_bats': self.at_bats, 'average_batter_age': self.average_batter_age, 'average_pitcher_age': self.average_pitcher_age, 'away_losses': self.away_losses, 'away_record': self.away_record, 'away_wins': self.away_wins, 'balks': self.balks, 'bases_on_balls': self.bases_on_balls, 'bases_on_walks_given': self.bases_on_walks_given, 'bases_on_walks_given_per_nine_innings': self.bases_on_walks_given_per_nine_innings, 'batters_faced': self.batters_faced, 'batting_average': self.batting_average, 'complete_game_shutouts': self.complete_game_shutouts, 'complete_games': self.complete_games, 'doubles': self.doubles, 'earned_runs_against': self.earned_runs_against, 'earned_runs_against_plus': self.earned_runs_against_plus, 'extra_inning_losses': self.extra_inning_losses, 'extra_inning_record': self.extra_inning_record, 'extra_inning_wins': self.extra_inning_wins, 'fielding_independent_pitching': self.fielding_independent_pitching, 'games': self.games, 'games_finished': self.games_finished, 'grounded_into_double_plays': self.grounded_into_double_plays, 'hit_pitcher': self.hit_pitcher, 'hits': self.hits, 'hits_allowed': self.hits_allowed, 'hits_per_nine_innings': self.hits_per_nine_innings, 'home_losses': self.home_losses, 'home_record': self.home_record, 'home_runs': self.home_runs, 'home_runs_against': self.home_runs_against, 'home_runs_per_nine_innings': self.home_runs_per_nine_innings, 'home_wins': self.home_wins, 'innings_pitched': self.innings_pitched, 'intentional_bases_on_balls': self.intentional_bases_on_balls, 'interleague_record': self.interleague_record, 'last_ten_games_record': self.last_ten_games_record, 'last_thirty_games_record': self.last_thirty_games_record, 'last_twenty_games_record': self.last_twenty_games_record, 'league': self.league, 'losses': self.losses, 'losses_last_ten_games': self.losses_last_ten_games, 'losses_last_thirty_games': self.losses_last_thirty_games, 'losses_last_twenty_games': self.losses_last_twenty_games, 'losses_vs_left_handed_pitchers': self.losses_vs_left_handed_pitchers, 'losses_vs_right_handed_pitchers': self.losses_vs_right_handed_pitchers, 'losses_vs_teams_over_500': self.losses_vs_teams_over_500, 'losses_vs_teams_under_500': self.losses_vs_teams_under_500, 'luck': self.luck, 'name': self.name, 'number_of_pitchers': self.number_of_pitchers, 'number_players_used': self.number_players_used, 'on_base_percentage': self.on_base_percentage, 'on_base_plus_slugging_percentage': self.on_base_plus_slugging_percentage, 'on_base_plus_slugging_percentage_plus': self.on_base_plus_slugging_percentage_plus, 'opposing_runners_left_on_base': self.opposing_runners_left_on_base, 'plate_appearances': self.plate_appearances, 'pythagorean_win_loss': self.pythagorean_win_loss, 'rank': self.rank, 'record_vs_left_handed_pitchers': self.record_vs_left_handed_pitchers, 'record_vs_right_handed_pitchers': self.record_vs_right_handed_pitchers, 'record_vs_teams_over_500': self.record_vs_teams_over_500, 'record_vs_teams_under_500': self.record_vs_teams_under_500, 'run_difference': self.run_difference, 'runners_left_on_base': self.runners_left_on_base, 'runs': self.runs, 'runs_against': self.runs_against, 'runs_allowed_per_game': self.runs_allowed_per_game, 'runs_batted_in': self.runs_batted_in, 'sacrifice_flies': self.sacrifice_flies, 'sacrifice_hits': self.sacrifice_hits, 'saves': self.saves, 'shutouts': self.shutouts, 'simple_rating_system': self.simple_rating_system, 'single_run_losses': self.single_run_losses, 'single_run_record': self.single_run_record, 'single_run_wins': self.single_run_wins, 'slugging_percentage': self.slugging_percentage, 'stolen_bases': self.stolen_bases, 'streak': self.streak, 'strength_of_schedule': self.strength_of_schedule, 'strikeouts': self.strikeouts, 'strikeouts_per_base_on_balls': self.strikeouts_per_base_on_balls, 'strikeouts_per_nine_innings': self.strikeouts_per_nine_innings, 'times_caught_stealing': self.times_caught_stealing, 'times_hit_by_pitch': self.times_hit_by_pitch, 'times_struck_out': self.times_struck_out, 'total_bases': self.total_bases, 'total_runs': self.total_runs, 'triples': self.triples, 'whip': self.whip, 'wild_pitches': self.wild_pitches, 'win_percentage': self.win_percentage, 'wins': self.wins, 'wins_last_ten_games': self.wins_last_ten_games, 'wins_last_thirty_games': self.wins_last_thirty_games, 'wins_last_twenty_games': self.wins_last_twenty_games, 'wins_vs_left_handed_pitchers': self.wins_vs_left_handed_pitchers, 'wins_vs_right_handed_pitchers': self.wins_vs_right_handed_pitchers, 'wins_vs_teams_over_500': self.wins_vs_teams_over_500, 'wins_vs_teams_under_500': self.wins_vs_teams_under_500 } return pd.DataFrame([fields_to_include], index=[self._abbreviation]) @int_property_decorator def rank(self): return self._rank @property def abbreviation(self): return self._abbreviation @property def schedule(self): return Schedule(self._abbreviation, self._year) @property def roster(self): return Roster(self._abbreviation, self._year) @property def name(self): return self._name @property def league(self): return self._league @int_property_decorator def games(self): return self._games @int_property_decorator def wins(self): return self._wins @int_property_decorator def losses(self): return self._losses @float_property_decorator def win_percentage(self): return self._win_percentage @property def streak(self): return self._streak @float_property_decorator def runs(self): return self._runs @float_property_decorator def runs_against(self): return self._runs_against @float_property_decorator def run_difference(self): return self._run_difference @float_property_decorator def strength_of_schedule(self): return self._strength_of_schedule @float_property_decorator def simple_rating_system(self): return self._simple_rating_system @property def pythagorean_win_loss(self): return self._pythagorean_win_loss @int_property_decorator def luck(self): return self._luck @property def interleague_record(self): return self._interleague_record @property def home_record(self): return self._home_record @mlb_int_property_decorator def home_wins(self): return self._home_record @mlb_int_property_decorator def home_losses(self): return self._home_record @property def away_record(self): return self._away_record @mlb_int_property_decorator def away_wins(self): return self._away_record @mlb_int_property_decorator def away_losses(self): return self._away_record @property def extra_inning_record(self): return self._extra_inning_record @mlb_int_property_decorator def extra_inning_wins(self): return self._extra_inning_record @mlb_int_property_decorator def extra_inning_losses(self): return self._extra_inning_record @property def single_run_record(self): return self._single_run_record @mlb_int_property_decorator def single_run_wins(self): return self._single_run_record @mlb_int_property_decorator def single_run_losses(self): return self._single_run_record @property def record_vs_right_handed_pitchers(self): return self._record_vs_right_handed_pitchers @mlb_int_property_decorator def wins_vs_right_handed_pitchers(self): return self._record_vs_right_handed_pitchers @mlb_int_property_decorator def losses_vs_right_handed_pitchers(self): return self._record_vs_right_handed_pitchers @property def record_vs_left_handed_pitchers(self): return self._record_vs_left_handed_pitchers @mlb_int_property_decorator def wins_vs_left_handed_pitchers(self): return self._record_vs_left_handed_pitchers @mlb_int_property_decorator def losses_vs_left_handed_pitchers(self): return self._record_vs_left_handed_pitchers @property def record_vs_teams_over_500(self): return self._record_vs_teams_over_500 @mlb_int_property_decorator def wins_vs_teams_over_500(self): return self._record_vs_teams_over_500 @mlb_int_property_decorator def losses_vs_teams_over_500(self): return self._record_vs_teams_over_500 @property def record_vs_teams_under_500(self): return self._record_vs_teams_under_500 @mlb_int_property_decorator def wins_vs_teams_under_500(self): return self._record_vs_teams_under_500 @mlb_int_property_decorator def losses_vs_teams_under_500(self): return self._record_vs_teams_under_500 @property def last_ten_games_record(self): return self._last_ten_games_record @mlb_int_property_decorator def wins_last_ten_games(self): return self._last_ten_games_record @mlb_int_property_decorator def losses_last_ten_games(self): return self._last_ten_games_record @property def last_twenty_games_record(self): return self._last_twenty_games_record @mlb_int_property_decorator def wins_last_twenty_games(self): return self._last_twenty_games_record @mlb_int_property_decorator def losses_last_twenty_games(self): return self._last_twenty_games_record @property def last_thirty_games_record(self): return self._last_thirty_games_record @mlb_int_property_decorator def wins_last_thirty_games(self): return self._last_thirty_games_record @mlb_int_property_decorator def losses_last_thirty_games(self): return self._last_thirty_games_record @int_property_decorator def number_players_used(self): return self._number_players_used @float_property_decorator def average_batter_age(self): return self._average_batter_age @int_property_decorator def plate_appearances(self): return self._plate_appearances @int_property_decorator def at_bats(self): return self._at_bats @int_property_decorator def total_runs(self): return self._total_runs @int_property_decorator def hits(self): return self._hits @int_property_decorator def doubles(self): return self._doubles @int_property_decorator def triples(self): return self._triples @int_property_decorator def home_runs(self): return self._home_runs @int_property_decorator def runs_batted_in(self): return self._runs_batted_in @int_property_decorator def stolen_bases(self): return self._stolen_bases @int_property_decorator def times_caught_stealing(self): return self._times_caught_stealing @int_property_decorator
MIT License
restran/hacker-scripts
misc/win_file_monitor/watchdog/observers/kqueue.py
KqueueEmitter.queue_event
python
def queue_event(self, event): EventEmitter.queue_event(self, event) if event.event_type == EVENT_TYPE_CREATED: self._register_kevent(event.src_path, event.is_directory) elif event.event_type == EVENT_TYPE_MOVED: self._unregister_kevent(event.src_path) self._register_kevent(event.dest_path, event.is_directory) elif event.event_type == EVENT_TYPE_DELETED: self._unregister_kevent(event.src_path)
Handles queueing a single event object. :param event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass.
https://github.com/restran/hacker-scripts/blob/30bbfd8bb97cda2b4762156aaf2973296f0e7cde/misc/win_file_monitor/watchdog/observers/kqueue.py#L510-L529
from __future__ import with_statement from watchdog.utils import platform import threading import errno import sys import stat import os if sys.version_info < (2, 7, 0): import select_backport as select else: import select from pathtools.path import absolute_path from watchdog.observers.api import ( BaseObserver, EventEmitter, DEFAULT_OBSERVER_TIMEOUT, DEFAULT_EMITTER_TIMEOUT ) from watchdog.utils.dirsnapshot import DirectorySnapshot from watchdog.events import ( DirMovedEvent, DirDeletedEvent, DirCreatedEvent, DirModifiedEvent, FileMovedEvent, FileDeletedEvent, FileCreatedEvent, FileModifiedEvent, EVENT_TYPE_MOVED, EVENT_TYPE_DELETED, EVENT_TYPE_CREATED ) MAX_EVENTS = 4096 O_EVTONLY = 0x8000 if platform.is_darwin(): WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY else: WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR WATCHDOG_KQ_FFLAGS = ( select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK | select.KQ_NOTE_RENAME | select.KQ_NOTE_REVOKE ) def is_deleted(kev): return kev.fflags & select.KQ_NOTE_DELETE def is_modified(kev): fflags = kev.fflags return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE) def is_attrib_modified(kev): return kev.fflags & select.KQ_NOTE_ATTRIB def is_renamed(kev): return kev.fflags & select.KQ_NOTE_RENAME class KeventDescriptorSet(object): def __init__(self): self._descriptors = set() self._descriptor_for_path = dict() self._descriptor_for_fd = dict() self._kevents = list() self._lock = threading.Lock() @property def kevents(self): with self._lock: return self._kevents @property def paths(self): with self._lock: return list(self._descriptor_for_path.keys()) def get_for_fd(self, fd): with self._lock: return self._descriptor_for_fd[fd] def get(self, path): with self._lock: path = absolute_path(path) return self._get(path) def __contains__(self, path): with self._lock: path = absolute_path(path) return self._has_path(path) def add(self, path, is_directory): with self._lock: path = absolute_path(path) if not self._has_path(path): self._add_descriptor(KeventDescriptor(path, is_directory)) def remove(self, path): with self._lock: path = absolute_path(path) if self._has_path(path): self._remove_descriptor(self._get(path)) def clear(self): with self._lock: for descriptor in self._descriptors: descriptor.close() self._descriptors.clear() self._descriptor_for_fd.clear() self._descriptor_for_path.clear() self._kevents = [] def _get(self, path): return self._descriptor_for_path[path] def _has_path(self, path): return path in self._descriptor_for_path def _add_descriptor(self, descriptor): self._descriptors.add(descriptor) self._kevents.append(descriptor.kevent) self._descriptor_for_path[descriptor.path] = descriptor self._descriptor_for_fd[descriptor.fd] = descriptor def _remove_descriptor(self, descriptor): self._descriptors.remove(descriptor) del self._descriptor_for_fd[descriptor.fd] del self._descriptor_for_path[descriptor.path] self._kevents.remove(descriptor.kevent) descriptor.close() class KeventDescriptor(object): def __init__(self, path, is_directory): self._path = absolute_path(path) self._is_directory = is_directory self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS) self._kev = select.kevent(self._fd, filter=WATCHDOG_KQ_FILTER, flags=WATCHDOG_KQ_EV_FLAGS, fflags=WATCHDOG_KQ_FFLAGS) @property def fd(self): return self._fd @property def path(self): return self._path @property def kevent(self): return self._kev @property def is_directory(self): return self._is_directory def close(self): try: os.close(self.fd) except OSError: pass @property def key(self): return (self.path, self.is_directory) def __eq__(self, descriptor): return self.key == descriptor.key def __ne__(self, descriptor): return self.key != descriptor.key def __hash__(self): return hash(self.key) def __repr__(self): return "<KeventDescriptor: path=%s, is_directory=%s>" % (self.path, self.is_directory) class KqueueEmitter(EventEmitter): def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._kq = select.kqueue() self._lock = threading.RLock() self._descriptors = KeventDescriptorSet() def walker_callback(path, stat_info, self=self): self._register_kevent(path, stat.S_ISDIR(stat_info.st_mode)) self._snapshot = DirectorySnapshot(watch.path, watch.is_recursive, walker_callback) def _register_kevent(self, path, is_directory): try: self._descriptors.add(path, is_directory) except OSError as e: if e.errno == errno.ENOENT: pass else: raise def _unregister_kevent(self, path): self._descriptors.remove(path)
MIT License
bolt-project/bolt
bolt/spark/utils.py
zip_with_index
python
def zip_with_index(rdd): starts = [0] if rdd.getNumPartitions() > 1: nums = rdd.mapPartitions(lambda it: [sum(1 for _ in it)]).collect() count = sum(nums) for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) else: count = rdd.count() def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return count, rdd.mapPartitionsWithIndex(func)
Alternate version of Spark's zipWithIndex that eagerly returns count.
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/utils.py#L14-L31
def get_kv_shape(shape, key_axes): func = lambda axis: shape[axis] return _get_kv_func(func, shape, key_axes) def get_kv_axes(shape, key_axes): func = lambda axis: axis return _get_kv_func(func, shape, key_axes) def _get_kv_func(func, shape, key_axes): key_res = [func(axis) for axis in key_axes] value_res = [func(axis) for axis in range(len(shape)) if axis not in key_axes] return key_res, value_res
Apache License 2.0
oddt/oddt
oddt/utils.py
is_molecule
python
def is_molecule(obj): return is_openbabel_molecule(obj) or is_rdkit_molecule(obj)
Check whether an object is an `oddt.toolkits.{rdk,ob}.Molecule` instance. .. versionadded:: 0.6
https://github.com/oddt/oddt/blob/8cf555820d97a692ade81c101ebe10e28bcb3722/oddt/utils.py#L9-L14
from itertools import islice from types import GeneratorType import numpy as np import oddt
BSD 3-Clause New or Revised License
qiskit/qiskit-aqua
qiskit/aqua/components/optimizers/gsls.py
GSLS.__init__
python
def __init__(self, maxiter: int = 10000, max_eval: int = 10000, disp: bool = False, sampling_radius: float = 1.0e-6, sample_size_factor: int = 1, initial_step_size: float = 1.0e-2, min_step_size: float = 1.0e-10, step_size_multiplier: float = 0.4, armijo_parameter: float = 1.0e-1, min_gradient_norm: float = 1e-8, max_failed_rejection_sampling: int = 50, max_iter: Optional[int] = None) -> None: super().__init__() if max_iter is not None: warnings.warn('The max_iter parameter is deprecated as of ' '0.8.0 and will be removed no sooner than 3 months after the release. ' 'You should use maxiter instead.', DeprecationWarning) maxiter = max_iter for k, v in list(locals().items()): if k in self._OPTIONS: self._options[k] = v
Args: maxiter: Maximum number of iterations. max_eval: Maximum number of evaluations. disp: Set to True to display convergence messages. sampling_radius: Sampling radius to determine gradient estimate. sample_size_factor: The size of the sample set at each iteration is this number multiplied by the dimension of the problem, rounded to the nearest integer. initial_step_size: Initial step size for the descent algorithm. min_step_size: Minimum step size for the descent algorithm. step_size_multiplier: Step size reduction after unsuccessful steps, in the interval (0, 1). armijo_parameter: Armijo parameter for sufficient decrease criterion, in the interval (0, 1). min_gradient_norm: If the gradient norm is below this threshold, the algorithm stops. max_failed_rejection_sampling: Maximum number of attempts to sample points within bounds. max_iter: Deprecated, use maxiter.
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/components/optimizers/gsls.py#L40-L81
import warnings from typing import Dict, Optional, Tuple, List, Callable import logging import numpy as np from qiskit.aqua import aqua_globals from .optimizer import Optimizer, OptimizerSupportLevel logger = logging.getLogger(__name__) class GSLS(Optimizer): _OPTIONS = ['maxiter', 'max_eval', 'disp', 'sampling_radius', 'sample_size_factor', 'initial_step_size', 'min_step_size', 'step_size_multiplier', 'armijo_parameter', 'min_gradient_norm', 'max_failed_rejection_sampling']
Apache License 2.0
cityofzion/neo-python-core
neocore/KeyPair.py
KeyPair.PrivateKeyFromNEP2
python
def PrivateKeyFromNEP2(nep2_key, passphrase): if not nep2_key or len(nep2_key) != 58: raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key))) ADDRESS_HASH_SIZE = 4 ADDRESS_HASH_OFFSET = len(NEP_FLAG) + len(NEP_HEADER) try: decoded_key = base58.b58decode_check(nep2_key) except Exception as e: raise ValueError("Invalid nep2_key") address_hash = decoded_key[ADDRESS_HASH_OFFSET:ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE] encrypted = decoded_key[-32:] pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8') derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES) derived1 = derived[:32] derived2 = derived[32:] cipher = AES.new(derived2, AES.MODE_ECB) decrypted = cipher.decrypt(encrypted) private_key = xor_bytes(decrypted, derived1) kp_new = KeyPair(priv_key=private_key) kp_new_address = kp_new.GetAddress() kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode("utf-8")).digest() kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest() kp_new_address_hash = kp_new_address_hash_tmp2[:4] if (kp_new_address_hash != address_hash): raise ValueError("Wrong passphrase") return private_key
Gets the private key from a NEP-2 encrypted private key Args: nep2_key (str): The nep-2 encrypted private key passphrase (str): The password to encrypt the private key with, as unicode string Returns: bytes: The private key
https://github.com/cityofzion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/KeyPair.py#L109-L157
import hashlib import unicodedata import base58 import scrypt import bitcoin from Crypto.Cipher import AES from neocore.Cryptography.ECCurve import ECDSA from neocore.Cryptography.Crypto import Crypto from neocore.Cryptography.Helper import xor_bytes SCRYPT_ITERATIONS = 16384 SCRYPT_BLOCKSIZE = 8 SCRYPT_PARALLEL_FACTOR = 8 SCRYPT_KEY_LEN_BYTES = 64 NEP_HEADER = bytearray([0x01, 0x42]) NEP_FLAG = bytearray([0xe0]) class KeyPair(object): PublicKeyHash = None PublicKey = None PrivateKey = None def setup_curve(self): bitcoin.change_curve( 115792089210356248762697446949407573530086143415290314195533631308867097853951, 115792089210356248762697446949407573529996955224135760342422259061068512044369, 115792089210356248762697446949407573530086143415290314195533631308867097853948, 41058363725152142129326129780047268409114441015993725554835256314039467401291, 48439561293906451759052585252797914202762949526041747995844080717082404635286, 36134250956749795798585127919587881956611106672985015071877198253568414405109 ) def __init__(self, priv_key): self.setup_curve() length = len(priv_key) if length != 32 and length != 96 and length != 104: raise ValueError("Invalid private key") self.PrivateKey = bytearray(priv_key[-32:]) pubkey_encoded_not_compressed = None if length == 32: try: pubkey_encoded_not_compressed = bitcoin.privkey_to_pubkey(priv_key) except Exception as e: raise Exception("Could not determine public key") elif length == 96 or length == 104: skip = length - 96 pubkey_encoded_not_compressed = bytearray(b'\x04') + bytearray(priv_key[skip:skip + 64]) if pubkey_encoded_not_compressed: pubkey_points = bitcoin.decode_pubkey(pubkey_encoded_not_compressed, 'bin') pubx = pubkey_points[0] puby = pubkey_points[1] edcsa = ECDSA.secp256r1() self.PublicKey = edcsa.Curve.point(pubx, puby) self.PublicKeyHash = Crypto.ToScriptHash(self.PublicKey.encode_point(True), unhex=True) @staticmethod def PrivateKeyFromWIF(wif): if wif is None or len(wif) is not 52: raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif))) data = base58.b58decode(wif) length = len(data) if length is not 38 or data[0] is not 0x80 or data[33] is not 0x01: raise ValueError("Invalid format!") checksum = Crypto.Hash256(data[0:34])[0:4] if checksum != data[34:]: raise ValueError("Invalid WIF Checksum!") return data[1:33] @staticmethod
MIT License
hyperledger/aries-cloudagent-python
aries_cloudagent/indy/sdk/issuer.py
IndySdkIssuer.merge_revocation_registry_deltas
python
async def merge_revocation_registry_deltas( self, fro_delta: str, to_delta: str ) -> str: return await indy.anoncreds.issuer_merge_revocation_registry_deltas( fro_delta, to_delta )
Merge revocation registry deltas. Args: fro_delta: original delta in JSON format to_delta: incoming delta in JSON format Returns: Merged delta in JSON format
https://github.com/hyperledger/aries-cloudagent-python/blob/fec69f1a2301e4745fc9d40cea190050e3f595fa/aries_cloudagent/indy/sdk/issuer.py#L344-L361
import json import logging from typing import Sequence, Tuple import indy.anoncreds import indy.blob_storage from indy.error import AnoncredsRevocationRegistryFullError, IndyError, ErrorCode from ...indy.sdk.profile import IndySdkProfile from ...messaging.util import encode from ...revocation.models.issuer_cred_rev_record import IssuerCredRevRecord from ...storage.error import StorageError from ..issuer import ( IndyIssuer, IndyIssuerError, IndyIssuerRevocationRegistryFullError, DEFAULT_CRED_DEF_TAG, DEFAULT_SIGNATURE_TYPE, ) from .error import IndyErrorHandler from .util import create_tails_reader, create_tails_writer LOGGER = logging.getLogger(__name__) class IndySdkIssuer(IndyIssuer): def __init__(self, profile: IndySdkProfile): self.profile = profile async def create_schema( self, origin_did: str, schema_name: str, schema_version: str, attribute_names: Sequence[str], ) -> Tuple[str, str]: with IndyErrorHandler("Error when creating schema", IndyIssuerError): schema_id, schema_json = await indy.anoncreds.issuer_create_schema( origin_did, schema_name, schema_version, json.dumps(attribute_names), ) return (schema_id, schema_json) async def credential_definition_in_wallet( self, credential_definition_id: str ) -> bool: try: await indy.anoncreds.issuer_create_credential_offer( self.profile.wallet.handle, credential_definition_id ) return True except IndyError as err: if err.error_code not in ( ErrorCode.CommonInvalidStructure, ErrorCode.WalletItemNotFound, ): raise IndyErrorHandler.wrap_error( err, "Error when checking wallet for credential definition", IndyIssuerError, ) from err return False async def create_and_store_credential_definition( self, origin_did: str, schema: dict, signature_type: str = None, tag: str = None, support_revocation: bool = False, ) -> Tuple[str, str]: with IndyErrorHandler( "Error when creating credential definition", IndyIssuerError ): ( credential_definition_id, credential_definition_json, ) = await indy.anoncreds.issuer_create_and_store_credential_def( self.profile.wallet.handle, origin_did, json.dumps(schema), tag or DEFAULT_CRED_DEF_TAG, signature_type or DEFAULT_SIGNATURE_TYPE, json.dumps({"support_revocation": support_revocation}), ) return (credential_definition_id, credential_definition_json) async def create_credential_offer(self, credential_definition_id: str) -> str: with IndyErrorHandler( "Exception when creating credential offer", IndyIssuerError ): credential_offer_json = await indy.anoncreds.issuer_create_credential_offer( self.profile.wallet.handle, credential_definition_id ) return credential_offer_json async def create_credential( self, schema: dict, credential_offer: dict, credential_request: dict, credential_values: dict, cred_ex_id: str, rev_reg_id: str = None, tails_file_path: str = None, ) -> Tuple[str, str]: encoded_values = {} schema_attributes = schema["attrNames"] for attribute in schema_attributes: try: credential_value = credential_values[attribute] except KeyError: raise IndyIssuerError( "Provided credential values are missing a value " + f"for the schema attribute '{attribute}'" ) encoded_values[attribute] = {} encoded_values[attribute]["raw"] = str(credential_value) encoded_values[attribute]["encoded"] = encode(credential_value) tails_reader_handle = ( await create_tails_reader(tails_file_path) if tails_file_path is not None else None ) try: ( credential_json, cred_rev_id, _, ) = await indy.anoncreds.issuer_create_credential( self.profile.wallet.handle, json.dumps(credential_offer), json.dumps(credential_request), json.dumps(encoded_values), rev_reg_id, tails_reader_handle, ) if cred_rev_id: issuer_cr_rec = IssuerCredRevRecord( state=IssuerCredRevRecord.STATE_ISSUED, cred_ex_id=cred_ex_id, rev_reg_id=rev_reg_id, cred_rev_id=cred_rev_id, ) async with self.profile.session() as session: await issuer_cr_rec.save( session, reason=( "Created issuer cred rev record for " f"rev reg id {rev_reg_id}, {cred_rev_id}" ), ) except AnoncredsRevocationRegistryFullError: LOGGER.warning( "Revocation registry %s is full: cannot create credential", rev_reg_id, ) raise IndyIssuerRevocationRegistryFullError( f"Revocation registry {rev_reg_id} is full" ) except IndyError as err: raise IndyErrorHandler.wrap_error( err, "Error when issuing credential", IndyIssuerError ) from err except StorageError as err: LOGGER.warning( ( "Created issuer cred rev record for " "Could not store issuer cred rev record for " "rev reg id %s, cred rev id %s: %s" ), rev_reg_id, cred_rev_id, err.roll_up, ) return (credential_json, cred_rev_id) async def revoke_credentials( self, rev_reg_id: str, tails_file_path: str, cred_rev_ids: Sequence[str] ) -> Tuple[str, Sequence[str]]: failed_crids = [] tails_reader_handle = await create_tails_reader(tails_file_path) result_json = None for cred_rev_id in cred_rev_ids: with IndyErrorHandler( "Exception when revoking credential", IndyIssuerError ): try: session = await self.profile.session() delta_json = await indy.anoncreds.issuer_revoke_credential( self.profile.wallet.handle, tails_reader_handle, rev_reg_id, cred_rev_id, ) issuer_cr_rec = await IssuerCredRevRecord.retrieve_by_ids( session, rev_reg_id, cred_rev_id, ) await issuer_cr_rec.set_state( session, IssuerCredRevRecord.STATE_REVOKED ) except IndyError as err: if err.error_code == ErrorCode.AnoncredsInvalidUserRevocId: LOGGER.error( ( "Abstaining from revoking credential on " "rev reg id %s, cred rev id=%s: " "already revoked or not yet issued" ), rev_reg_id, cred_rev_id, ) else: LOGGER.error( IndyErrorHandler.wrap_error( err, "Revocation error", IndyIssuerError ).roll_up ) failed_crids.append(cred_rev_id) continue except StorageError as err: LOGGER.warning( ( "Revoked credential on rev reg id %s, cred rev id %s " "without corresponding issuer cred rev record: %s" ), rev_reg_id, cred_rev_id, err.roll_up, ) if result_json: result_json = await self.merge_revocation_registry_deltas( result_json, delta_json ) else: result_json = delta_json return (result_json, failed_crids)
Apache License 2.0
2ndwatch/cloudendure-python
cloudendure/cloudendure_api/models/cloud_endure_bandwidth_throttling.py
CloudEndureBandwidthThrottling.to_dict
python
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list( map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value) ) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items(), ) ) else: result[attr] = value if issubclass(CloudEndureBandwidthThrottling, dict): for key, value in self.items(): result[key] = value return result
Returns the model properties as a dict
https://github.com/2ndwatch/cloudendure-python/blob/f81d1be1422b7c19adedb06c584803eaaa811919/cloudendure/cloudendure_api/models/cloud_endure_bandwidth_throttling.py#L66-L93
import pprint import re import six class CloudEndureBandwidthThrottling: """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = {"bandwidth_throttling": "int"} attribute_map = {"bandwidth_throttling": "bandwidthThrottling"} def __init__(self, bandwidth_throttling=None): self._bandwidth_throttling = None self.discriminator = None if bandwidth_throttling is not None: self.bandwidth_throttling = bandwidth_throttling @property def bandwidth_throttling(self): return self._bandwidth_throttling @bandwidth_throttling.setter def bandwidth_throttling(self, bandwidth_throttling): self._bandwidth_throttling = bandwidth_throttling
MIT License
deepsphere/deepsphere-weather
modules/utils_config.py
check_numeric_precision
python
def check_numeric_precision(numeric_precision): if not isinstance(numeric_precision, str): raise TypeError("Specify 'numeric_precision' as a string") if not [numeric_precision] in ['float64', 'float32','float16','bfloat16']: raise ValueError("Valid 'numeric precision' are: 'float64', 'float32','float16' and 'bfloat16'")
Check numeric precision argument.
https://github.com/deepsphere/deepsphere-weather/blob/a9c75de9c9852a2832883cd998efd16d6542b083/modules/utils_config.py#L286-L291
import os import sys import json import torch import pickle import shutil import inspect import types import numpy as np import deepdiff from modules.utils_torch import set_pytorch_deterministic from modules.utils_torch import set_pytorch_numeric_precision def get_default_model_settings(): model_settings = {"pretrained_model_name": None, "model_name_prefix": None, "model_name": None, "model_name_suffix": None, "kernel_size_conv": 3, "bias": True, "batch_norm": False, "batch_norm_before_activation": False, "activation": True, "activation_fun": 'relu', "pool_method": "Max", "kernel_size_pooling": 4, "conv_type": 'graph', "graph_type": "knn", "knn": 20, "periodic_padding": 'True', } return model_settings def get_default_training_settings(): training_settings = {"epochs": 15, "ar_training_strategy": "RNN", "learning_rate": 0.001, "training_batch_size": 16, "validation_batch_size": 16, "scoring_interval": 20, "save_model_each_epoch": False, "numeric_precision": "float32", "deterministic_training": False, "seed_model_weights": 100, "seed_random_shuffling": 120, "benchmark_cudnn": True, "gpu_training": True, "gpu_devices_ids": [0], "dataparallel_training": False, } return training_settings def get_default_ar_settings(): ar_settings = {"input_k": [-3,-2,-1], "output_k": [0], "forecast_cycle": 1, "ar_iterations": 6, "stack_most_recent_prediction": True, } return ar_settings def get_default_dataloader_settings(): dataloader_settings = {"random_shuffling": True, "drop_last_batch": True, "prefetch_in_gpu": False, "prefetch_factor": 2, "pin_memory": False, "asyncronous_gpu_transfer": True, "num_workers": 8, "autotune_num_workers": False, } return dataloader_settings def get_default_SWAG_settings(): dataloader_settings = {"SWAG": False, "target_learning_rate": 0.007, "no_cov_mat": False, "max_num_models": 40, "swag_freq": 10, "swa_start": 0, "sampling_scale": 0.1, "nb_samples": 10 } return dataloader_settings def get_default_settings(): ar_settings = get_default_ar_settings() training_settings = get_default_training_settings() model_settings = get_default_model_settings() dataloader_settings = get_default_dataloader_settings() default_settings = {"model_settings": model_settings, "dataloader_settings": dataloader_settings, "training_settings": training_settings, "ar_settings": ar_settings, } return default_settings def read_config_file(fpath): with open(fpath) as input_file: cfg = json.load(input_file) return cfg def write_config_file(cfg, fpath): with open(fpath, "w") as output_file: json.dump(cfg, output_file, indent=4) def get_model_settings(cfg): model_settings = cfg['model_settings'] default_model_settings = get_default_model_settings() mandatory_keys = ['architecture_name', 'sampling', 'sampling_kwargs', "sampling_name"] optional_keys = list(default_model_settings.keys()) model_settings["architecture_name"] = cfg['model_settings'].get("architecture_name", None) model_settings["sampling"] = cfg['model_settings'].get("sampling", None) model_settings["sampling_kwargs"] = cfg['model_settings'].get("sampling_kwargs", None) model_settings["sampling_name"] = cfg['model_settings'].get("sampling_name", None) flag_error = False for key in mandatory_keys: if model_settings[key] is None: flag_error = True print("'{}' is a mandatory key that must be specified in the model settings section of the config file.".format(key)) if flag_error: raise ValueError('Specify the mandatory model settings keys in the config file!') for key in optional_keys: model_settings[key] = cfg['model_settings'].get(key, default_model_settings[key]) return model_settings def get_training_settings(cfg): training_settings = {} default_training_settings = get_default_training_settings() available_keys = list(default_training_settings.keys()) cfg_keys = np.array(list(cfg['training_settings'].keys())) invalid_keys = cfg_keys[np.isin(cfg_keys, available_keys, invert=True)] if len(invalid_keys) > 0: for key in invalid_keys: print("'{}' is an unvalid training setting key!".format(key)) raise ValueError('Specify only correct training setting keys in the config file!') for key in available_keys: training_settings[key] = cfg['training_settings'].get(key, default_training_settings[key]) if not isinstance(training_settings['gpu_devices_ids'], list): training_settings['gpu_devices_ids'] = [training_settings['gpu_devices_ids']] if not training_settings['gpu_training']: if training_settings['dataparallel_training']: print("DataParallel training is available only on GPUs!") training_settings['dataparallel_training'] = False return training_settings def get_dataloader_settings(cfg): dataloader_settings = {} default_dataloader_settings = get_default_dataloader_settings() available_keys = list(default_dataloader_settings.keys()) cfg_keys = np.array(list(cfg['dataloader_settings'].keys())) invalid_keys = cfg_keys[np.isin(cfg_keys, available_keys, invert=True)] if len(invalid_keys) > 0: for key in invalid_keys: print("'{}' is an unvalid dataloader setting key!".format(key)) raise ValueError('Specify only correct dataloader setting keys in the config file!') for key in available_keys: dataloader_settings[key] = cfg['dataloader_settings'].get(key, default_dataloader_settings[key]) return dataloader_settings def get_ar_settings(cfg): ar_settings = {} default_ar_settings = get_default_ar_settings() available_keys = list(default_ar_settings.keys()) cfg_keys = np.array(list(cfg['ar_settings'].keys())) invalid_keys = cfg_keys[np.isin(cfg_keys, available_keys, invert=True)] if len(invalid_keys) > 0: for key in invalid_keys: print("'{}' is an unvalid AR setting key!".format(key)) raise ValueError('Specify only correct AR setting keys in the config file!') for key in available_keys: ar_settings[key] = cfg['ar_settings'].get(key, default_ar_settings[key]) if not isinstance(ar_settings['input_k'], list): ar_settings['input_k'] = [ar_settings['input_k']] if not isinstance(ar_settings['output_k'], list): ar_settings['output_k'] = [ar_settings['output_k']] return ar_settings def get_SWAG_settings(cfg): SWAG_settings = {} default_SWAG_settings = get_default_SWAG_settings() available_keys = list(default_SWAG_settings.keys()) cfg_keys = np.array(list(cfg['SWAG_settings'].keys())) invalid_keys = cfg_keys[np.isin(cfg_keys, available_keys, invert=True)] if len(invalid_keys) > 0: for key in invalid_keys: print("'{}' is an unvalid SWAG setting key!".format(key)) raise ValueError('Specify only correct SWAG setting keys in the config file!') for key in available_keys: SWAG_settings[key] = cfg['SWAG_settings'].get(key, default_SWAG_settings[key]) return SWAG_settings def check_same_dict(x,y): ddif = deepdiff.DeepDiff(x,y, ignore_type_in_groups=[(str, np.str_)]) if len(ddif)> 0: print("The two dictionaries have the following mismatches :") print(ddif) raise ValueError("Not same dictionary.") return None
MIT License
javefang/pyaqara
aqara/client.py
AqaraClient.handle_message
python
def handle_message(self, msg, src_addr): _LOGGER.debug("handle_message from %s", src_addr) cmd = msg["cmd"] sid = msg["sid"] if cmd == "iam": addr = msg["ip"] self.on_gateway_discovered(sid, addr) elif cmd == "get_id_list_ack": sids = _extract_data(msg) self.on_devices_discovered(sid, sids) elif cmd == "read_ack": model = msg["model"] data = _extract_data(msg) self.on_read_ack(model, sid, data) elif cmd == "write_ack": if "model" not in msg: _LOGGER.error("write error: %s", json.dumps(msg)) return model = msg["model"] data = _extract_data(msg) self.on_write_ack(model, sid, data) elif cmd == "report": model = msg["model"] data = _extract_data(msg) self.on_report(model, sid, data) elif cmd == "heartbeat": model = msg["model"] data = _extract_data(msg) gw_token = None if "token" not in msg else msg["token"] self.on_heartbeat(model, sid, data, gw_token)
Override: handle_message implementation
https://github.com/javefang/pyaqara/blob/84c0edc991ba435b3eb6f0225737461850de96aa/aqara/client.py#L93-L125
import asyncio import json import logging from pydispatch import dispatcher from aqara.protocol import AqaraProtocol from aqara.gateway import AqaraGateway from aqara.const import ( LISTEN_IP, LISTEN_PORT, AQARA_EVENT_NEW_GATEWAY ) _LOGGER = logging.getLogger(__name__) def _extract_data(msg): return json.loads(msg["data"]) class AqaraClient(AqaraProtocol): def __init__(self, gw_secrets=None): super().__init__() self.transport = None self._gw_secrets = {} if gw_secrets is None else gw_secrets self._gateways = {} self._device_to_gw = {} @property def gateways(self): return self._gateways @asyncio.coroutine def start(self, loop): listen = loop.create_datagram_endpoint(lambda: self, local_addr=(LISTEN_IP, LISTEN_PORT)) transport, _protocol = yield from listen self.transport = transport self.discover_gateways() _LOGGER.info("started") def stop(self): if self.transport is None: _LOGGER.info("not started") else: self.transport.close() _LOGGER.info("stopped") def discover_gateways(self): _LOGGER.info('discovering gateways...') discovery_msg = {"cmd": "whois"} self.broadcast(discovery_msg) def discover_devices(self, gw_addr): discover_devices_msg = {"cmd": "get_id_list"} self.unicast(gw_addr, discover_devices_msg) def read_device(self, gw_addr, sid): read_msg = {"cmd": "read", "sid": sid} self.unicast(gw_addr, read_msg) def write_device(self, gw_addr, model, sid, data, meta=None): write_msg = { "cmd": "write", "model": model, "sid": sid, "data": json.dumps(data) } if meta != None: write_msg.update(meta) self.unicast(gw_addr, write_msg)
MIT License
spotify/luigi
luigi/contrib/kubernetes.py
KubernetesJobTask.print_pod_logs_on_exit
python
def print_pod_logs_on_exit(self): return False
Fetch and print the pod logs once the job is completed.
https://github.com/spotify/luigi/blob/ad5ddc9875e54cca8209863a8ec7bcc5d13ece8a/luigi/contrib/kubernetes.py#L192-L196
import logging import time import uuid from datetime import datetime import luigi logger = logging.getLogger('luigi-interface') try: from pykube.config import KubeConfig from pykube.http import HTTPClient from pykube.objects import Job, Pod except ImportError: logger.warning('pykube is not installed. KubernetesJobTask requires pykube.') class kubernetes(luigi.Config): auth_method = luigi.Parameter( default="kubeconfig", description="Authorization method to access the cluster") kubeconfig_path = luigi.Parameter( default="~/.kube/config", description="Path to kubeconfig file for cluster authentication") max_retrials = luigi.IntParameter( default=0, description="Max retrials in event of job failure") kubernetes_namespace = luigi.OptionalParameter( default=None, description="K8s namespace in which the job will run") class KubernetesJobTask(luigi.Task): __DEFAULT_POLL_INTERVAL = 5 __DEFAULT_POD_CREATION_INTERVAL = 5 _kubernetes_config = None def _init_kubernetes(self): self.__logger = logger self.__logger.debug("Kubernetes auth method: " + self.auth_method) if self.auth_method == "kubeconfig": self.__kube_api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path)) elif self.auth_method == "service-account": self.__kube_api = HTTPClient(KubeConfig.from_service_account()) else: raise ValueError("Illegal auth_method") self.job_uuid = str(uuid.uuid4().hex) now = datetime.utcnow() self.uu_name = "%s-%s-%s" % (self.name, now.strftime('%Y%m%d%H%M%S'), self.job_uuid[:16]) @property def auth_method(self): return self.kubernetes_config.auth_method @property def kubeconfig_path(self): return self.kubernetes_config.kubeconfig_path @property def kubernetes_namespace(self): return self.kubernetes_config.kubernetes_namespace @property def name(self): raise NotImplementedError("subclass must define name") @property def labels(self): return {} @property def spec_schema(self): raise NotImplementedError("subclass must define spec_schema") @property def max_retrials(self): return self.kubernetes_config.max_retrials @property def backoff_limit(self): return 6 @property def delete_on_success(self): return True @property
Apache License 2.0
intelxed/xed
pysrc/ild_cdict.py
get_state_op_widths
python
def get_state_op_widths(agi, state_space): widths_dict = {} for opname,val_dict in list(state_space.items()): if opname in agi.operand_storage.get_operands(): opnd = agi.operand_storage.get_operand(opname) widths_dict[opname] = int(opnd.bitwidth) continue maxval = max(val_dict.keys()) if maxval == 0: width = 1 else: width = int(math.floor(math.log(maxval, 2))) + 1 widths_dict[opname] = width widths_dict[_bin_MOD3] = 1 widths_dict[_vd_token_7] = 1 widths_dict[_rm_token_4] = 1 widths_dict[_mask_token_n0] = 1 widths_dict[_mask_token_zero] = 1 widths_dict['UIMM0'] = 8 return widths_dict
Returns a dictionary from operand name to operands bit width
https://github.com/intelxed/xed/blob/d57a3bd0a8ad7a1f0c6e2a1b58060d9014021098/pysrc/ild_cdict.py#L151-L177
import math import copy import collections import genutil import ildutil import ild_info import opnds import ild_phash import ild_codegen import ild_eosz import ild_easz import ild_nt import actions_codegen import actions import verbosity import tup2int import operand_storage _token_2_module = {'EOSZ':ild_eosz, 'EASZ':ild_easz} _find_fn_pfx = 'xed3_phash_find' def _log(f,s): if verbosity.vild(): f.write(s) def _set_state_space_from_ii(agi, ii, state_space): for bt in ii.ipattern.bits: if bt.is_operand_decider(): if bt.test == 'eq': state_space[bt.token][bt.requirement] = True for (name, binding) in list(ii.prebindings.items()): bitnum = len(binding.bit_info_list) if bitnum < 4: if not name in state_space: state_space[name] = {} for val in range(0, 2**bitnum): state_space[name][val] = True elif binding.is_constant(): val = int(binding.get_value(), 2) state_space[name][val] = True def _set_space_from_operands(agi, operands, state_space): state_dict = agi.common.state_bits for op in operands: ops = [] if op.name.lower() in state_dict: op_spec = state_dict[op.name.lower()].list_of_str found_op = False for w in op_spec: exapnded_op = opnds.parse_one_operand(w) ops.append(exapnded_op) else: ops.append(op) for op in ops: if (op.bits and op.name in state_space and op.type == 'imm_const'): op_val = int(op.bits, 16) state_space[op.name][op_val] = True def get_all_constraints_state_space(agi): state_space = collections.defaultdict(dict) for g in agi.generator_list: for ii in g.parser_output.instructions: _set_state_space_from_ii(agi, ii, state_space) for g in agi.generator_list: ii = g.parser_output.instructions[0] if genutil.field_check(ii,'iclass'): continue for ii in g.parser_output.instructions: _set_space_from_operands(agi, ii.operands, state_space) if 'VEXVALID' not in state_space: state_space['VEXVALID'][0] = True else: state_space['ZEROING'][1] = True state_space['VL'][3] = True return state_space
Apache License 2.0
kadeng/pypgmc
src/pypgmc/potential_tables.py
PotentialTable.max_marginalize
python
def max_marginalize(self, remove_var_set=[]): remove_var_set = self.discrete_pgm.map_var_set(set(remove_var_set)) remove = self.scope.intersection(remove_var_set) keep_vars = self.scope - remove_var_set sum_axes = sorted([self.var_idx_map[i] for i in remove]) res = T.max(self.pt_tensor, axis=sum_axes, keepdims=False) return PotentialTable(keep_vars, res, self.discrete_pgm)
max-marginalize over a given set of variables Args: remove_var_set(iterable(str)): Set of variables to max-marginalize out. May be any iterable (list, tuple, set, whatever) Returns: A new PotentialTable with the given variables max-marginalized out
https://github.com/kadeng/pypgmc/blob/909445fa3a426b07b39b65d2cb8979b1db8cdfca/src/pypgmc/potential_tables.py#L149-L162
__all__ = ['PotentialTable'] import theano import theano.tensor as T from discrete_pgm import DiscretePGM import numpy as np from expression_utils import LogSumExp from copy import copy class PotentialTable(object): def __init__(self, var_set, pt_tensor=None, discrete_pgm=None, name=None): if (discrete_pgm is None): discrete_pgm = DiscretePGM.get_context() if (discrete_pgm is None): raise Exception("No DiscretePGM specified, neither explicit nor as current context") self.discrete_pgm = discrete_pgm self.var_set = frozenset(var_set) self.scope = discrete_pgm.map_var_set(self.var_set) var_indices = sorted([ discrete_pgm.var_index(v) for v in var_set]) var_idx_map = {} for i, sorted_idx in enumerate(var_indices): var_idx_map[sorted_idx] = i self.var_indices = var_indices self.var_idx_map = var_idx_map shp = [self.discrete_pgm.cardinalities[vidx] for vidx in var_indices] self.shape = shp self.is_shared = False if (pt_tensor=="ones"): pt_tensor = T.ones(shp, dtype=theano.config.floatX) if (pt_tensor=='zeros'): pt_tensor = T.zeros(shp, dtype=theano.config.floatX) if (pt_tensor=="shared"): pt_tensor = theano.shared(np.zeros(shp, dtype=theano.config.floatX)) self.is_shared = True if (pt_tensor is None): bcast = [False,]*len(self.var_set) tensor_type = T.TensorType(dtype=theano.config.floatX, broadcastable=bcast) self.pt_tensor = T.TensorVariable(type=tensor_type, name=name) else: self.pt_tensor = T.as_tensor_variable(pt_tensor) def replace_tensor(self, tensor): assert tensor.broadcastable == self.pt_tensor.broadcastable res = copy(self) res.pt_tensor = tensor return res def _ct(self, other): if (other.var_set == self.var_set): return (self.pt_tensor, other.pt_tensor) union_var_set = other.scope.union(self.scope) vidx1 = frozenset(self.var_indices) vidx2 = frozenset(other.var_indices) union_indices = vidx1.union(vidx2) shape1 = [] shape2 = [] b1 = [] b2 = [] u1 = [] u2 = [] for i,vidx in enumerate(sorted(union_indices)): if (vidx in vidx1): shape1.append(self.discrete_pgm.cardinalities[vidx]) u1.append(i) else: shape1.append(1) b1.append(i) if (vidx in vidx2): shape2.append(self.discrete_pgm.cardinalities[vidx]) u2.append(i) else: shape2.append(1) b2.append(i) t1 = T.addbroadcast(T.unbroadcast(self.pt_tensor.reshape(shape1, len(shape1)), *u1), *b1) t2 = T.addbroadcast(T.unbroadcast(other.pt_tensor.reshape(shape2, len(shape2)), *u2), *b2) return (t1, t2) def _op_result_cpt(self, other, result): assert other.discrete_pgm == self.discrete_pgm return PotentialTable(other.scope.union(self.scope), result, self.discrete_pgm) def __mul__(self, other): t1,t2 = self._ct(other) return self._op_result_cpt(other, t1*t2) def __add__(self, other): t1,t2 = self._ct(other) return self._op_result_cpt(other, t1+t2) def marginalize(self, remove_var_set=[]): remove_var_set = self.discrete_pgm.map_var_set(set(remove_var_set)) remove = self.scope.intersection(remove_var_set) keep_vars = self.scope - remove_var_set sum_axes = sorted([self.var_idx_map[i] for i in remove]) res = T.sum(self.pt_tensor, axis=sum_axes, keepdims=False) return PotentialTable(keep_vars, res, self.discrete_pgm) def logsumexp_marginalize(self, remove_var_set=[]): remove_var_set = self.discrete_pgm.map_var_set(set(remove_var_set)) remove = self.scope.intersection(remove_var_set) keep_vars = self.scope - remove_var_set sum_axes = sorted([self.var_idx_map[i] for i in remove]) res = LogSumExp(self.pt_tensor, axis=sum_axes, keepdims=False) return PotentialTable(keep_vars, res, self.discrete_pgm)
Apache License 2.0
merll/docker-map
dockermap/build/buffer.py
DockerBuffer.fileobj
python
def fileobj(self): return self._fileobj
Read-only property, returning the reference to the file-like object. :return:
https://github.com/merll/docker-map/blob/54e325595fc0b6b9d154dacc790a222f957895da/dockermap/build/buffer.py#L53-L59
from __future__ import unicode_literals from abc import ABCMeta, abstractmethod import six from tempfile import NamedTemporaryFile from io import BytesIO class FinalizedError(Exception): pass class DockerBuffer(six.with_metaclass(ABCMeta, object)): def __init__(self, *args, **kwargs): self._fileobj = self.create_fileobj() self._finalized = False def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() @abstractmethod def create_fileobj(self): pass def check_not_finalized(self): if self._finalized: raise FinalizedError("File cannot be changed after it has been finalized.") @property
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_9/api/protection_groups_api.py
ProtectionGroupsApi.api29_protection_groups_host_groups_post_with_http_info
python
def api29_protection_groups_host_groups_post_with_http_info( self, authorization=None, x_request_id=None, group_names=None, member_names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if group_names is not None: if not isinstance(group_names, list): group_names = [group_names] if member_names is not None: if not isinstance(member_names, list): member_names = [member_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'group_names' in params: query_params.append(('group_names', params['group_names'])) collection_formats['group_names'] = 'csv' if 'member_names' in params: query_params.append(('member_names', params['member_names'])) collection_formats['member_names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.9/protection-groups/host-groups', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='MemberNoIdAllResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
Add a host group to a protection group Adds a host group member to a protection group. Members that are already in the protection group are not affected. For asynchronous replication, only members of the same type can belong to a protection group. The `group_names` parameter represents the name of the protection group, and the `member_names` parameter represents the name of the host group. The `group_names` and `member_names` parameters are required and must be set together. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api29_protection_groups_host_groups_post_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`. :param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: MemberNoIdAllResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_9/api/protection_groups_api.py#L469-L563
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class ProtectionGroupsApi(object): def __init__(self, api_client): self.api_client = api_client def api29_protection_groups_delete_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.9/protection-groups', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api29_protection_groups_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, destroyed=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, total_only=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api29_protection_groups_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api29_protection_groups_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'destroyed' in params: query_params.append(('destroyed', params['destroyed'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) if 'total_only' in params: query_params.append(('total_only', params['total_only'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.9/protection-groups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProtectionGroupGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api29_protection_groups_host_groups_delete_with_http_info( self, authorization=None, x_request_id=None, group_names=None, member_names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if group_names is not None: if not isinstance(group_names, list): group_names = [group_names] if member_names is not None: if not isinstance(member_names, list): member_names = [member_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'group_names' in params: query_params.append(('group_names', params['group_names'])) collection_formats['group_names'] = 'csv' if 'member_names' in params: query_params.append(('member_names', params['member_names'])) collection_formats['member_names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.9/protection-groups/host-groups', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api29_protection_groups_host_groups_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, group_names=None, limit=None, member_names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if group_names is not None: if not isinstance(group_names, list): group_names = [group_names] if member_names is not None: if not isinstance(member_names, list): member_names = [member_names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api29_protection_groups_host_groups_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api29_protection_groups_host_groups_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'group_names' in params: query_params.append(('group_names', params['group_names'])) collection_formats['group_names'] = 'csv' if 'limit' in params: query_params.append(('limit', params['limit'])) if 'member_names' in params: query_params.append(('member_names', params['member_names'])) collection_formats['member_names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.9/protection-groups/host-groups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='MemberNoIdAllGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
raymontag/keepassc
keepassc/daemon.py
Daemon.start
python
def start(self): try: with open(self.pidfile,'r') as pf: pid = int(pf.read().strip()) except IOError: pid = None if pid: message = "pidfile {0} already exist. " + "Daemon already running?\n" sys.stderr.write(message.format(self.pidfile)) sys.exit(1) self.daemonize() self.run()
Start the daemon.
https://github.com/raymontag/keepassc/blob/3a3c7ef7b3ee1ceb16b613176d54dad89c0408df/keepassc/daemon.py#L62-L81
import sys import os import time import atexit import signal class Daemon(object): def __init__(self, pidfile): self.pidfile = pidfile def daemonize(self): try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as err: sys.stderr.write('fork #1 failed: {0}\n'.format(err)) sys.exit(1) os.chdir('/') os.setsid() os.umask(0) try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as err: sys.stderr.write('fork #2 failed: {0}\n'.format(err)) sys.exit(1) sys.stdout.flush() sys.stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile,'w+') as f: f.write(pid + '\n') def delpid(self): os.remove(self.pidfile)
ISC License
lscsoft/bilby
bilby/bilby_mcmc/chain.py
Chain.tau_nocache
python
def tau_nocache(self): tau = max(self.tau_dict.values()) self.max_tau_dict[self.position] = tau self.cached_tau_count = 0 return tau
Calculate tau forcing a recalculation (no cached tau)
https://github.com/lscsoft/bilby/blob/b1e02f1dfae03d4939cae9c95eff300c22919689/bilby/bilby_mcmc/chain.py#L274-L279
from distutils.version import LooseVersion import numpy as np import pandas as pd from ..core.sampler.base_sampler import SamplerError from ..core.utils import logger from .utils import LOGLKEY, LOGLLATEXKEY, LOGPKEY, LOGPLATEXKEY class Chain(object): def __init__( self, initial_sample, burn_in_nact=1, thin_by_nact=1, fixed_discard=0, autocorr_c=5, min_tau=1, fixed_tau=None, tau_window=None, block_length=100000, ): self.autocorr_c = autocorr_c self.min_tau = min_tau self.burn_in_nact = burn_in_nact self.thin_by_nact = thin_by_nact self.block_length = block_length self.fixed_discard = int(fixed_discard) self.fixed_tau = fixed_tau self.tau_window = tau_window self.ndim = initial_sample.ndim self.current_sample = initial_sample self.keys = self.current_sample.keys self.parameter_keys = self.current_sample.parameter_keys self._chain_array = self._get_zero_chain_array() self._chain_array_length = block_length self.position = -1 self.max_log_likelihood = -np.inf self.max_tau_dict = {} self.converged = False self.cached_tau_count = 0 self._minimum_index_proposal = 0 self._minimum_index_adapt = 0 self._last_minimum_index = (0, 0, "I") self.last_full_tau_dict = {key: np.inf for key in self.parameter_keys} self.append(self.current_sample) def _get_zero_chain_array(self): return np.zeros((self.block_length, self.ndim + 2), dtype=np.float64) def _extend_chain_array(self): self._chain_array = np.concatenate( (self._chain_array, self._get_zero_chain_array()), axis=0 ) self._chain_array_length = len(self._chain_array) @property def current_sample(self): return self._current_sample.copy() @current_sample.setter def current_sample(self, current_sample): self._current_sample = current_sample def append(self, sample): self.position += 1 if self.position >= self._chain_array_length: self._extend_chain_array() self.current_sample = sample self._chain_array[self.position] = sample.list if sample[LOGLKEY] > self.max_log_likelihood: self.max_log_likelihood = sample[LOGLKEY] def __getitem__(self, index): if index < 0: index = index + self.position + 1 if index <= self.position: values = self._chain_array[index] return Sample({k: v for k, v in zip(self.keys, values)}) else: raise SamplerError(f"Requested index {index} out of bounds") def __setitem__(self, index, sample): if index < 0: index = index + self.position + 1 self._chain_array[index] = sample.list def key_to_idx(self, key): return self.keys.index(key) def get_1d_array(self, key): return self._chain_array[: 1 + self.position, self.key_to_idx(key)] @property def _random_idx(self): mindex = self._last_minimum_index[1] if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last: mindex = 0 return np.random.randint(mindex, self.position + 1) @property def random_sample(self): return self[self._random_idx] @property def fixed_discard(self): return self._fixed_discard @fixed_discard.setter def fixed_discard(self, fixed_discard): self._fixed_discard = int(fixed_discard) @property def minimum_index(self): position = self.position last_minimum_index = self._last_minimum_index if position == last_minimum_index[0]: return int(last_minimum_index[1]) if position < self.fixed_discard: self.minimum_index_method = "FD" return self.fixed_discard minimum_index_list = [self.fixed_discard] minimum_index_method_list = ["FD"] if self.tau_last < np.inf: tau = self.tau_last elif len(self.max_tau_dict) == 0: tau = self._tau_for_full_chain else: tau = np.inf if tau < np.inf: minimum_index_list.append(self.burn_in_nact * tau) minimum_index_method_list.append(f"{self.burn_in_nact}tau") if True: zfactor = 1 N = 100 delta_lnP = zfactor * self.ndim / 2 logl = self.get_1d_array(LOGLKEY) log_prior = self.get_1d_array(LOGPKEY) log_posterior = logl + log_prior max_posterior = np.max(log_posterior) ave = pd.Series(log_posterior).rolling(window=N).mean().iloc[N - 1 :] delta = max_posterior - ave passes = ave[delta < delta_lnP] if len(passes) > 0: minimum_index_list.append(passes.index[0] + 1) minimum_index_method_list.append(f"z{zfactor}") if False: minimum_index_list.append(last_minimum_index[1]) minimum_index_method_list.append(last_minimum_index[2]) minimum_index_list.append(self.minimum_index_proposal) minimum_index_method_list.append("PR") minimum_index_list.append(self.minimum_index_adapt) minimum_index_method_list.append("AD") minimum_index = int(np.max(minimum_index_list)) minimum_index_method = minimum_index_method_list[np.argmax(minimum_index_list)] self._last_minimum_index = (position, minimum_index, minimum_index_method) self.minimum_index_method = minimum_index_method return minimum_index @property def minimum_index_proposal(self): return self._minimum_index_proposal @minimum_index_proposal.setter def minimum_index_proposal(self, minimum_index_proposal): if minimum_index_proposal > self._minimum_index_proposal: self._minimum_index_proposal = minimum_index_proposal @property def minimum_index_adapt(self): return self._minimum_index_adapt @minimum_index_adapt.setter def minimum_index_adapt(self, minimum_index_adapt): if minimum_index_adapt > self._minimum_index_adapt: self._minimum_index_adapt = minimum_index_adapt @property def tau(self): if self.position in self.max_tau_dict: return self.max_tau_dict[self.position] elif ( self.tau_last < np.inf and self.cached_tau_count < 50 and self.nsamples_last > 50 ): self.cached_tau_count += 1 return self.tau_last else: return self.tau_nocache @property
MIT License
borda/birl
birl/utilities/evaluate.py
aggregate_user_score_timeline
python
def aggregate_user_score_timeline(df, col_aggreg, col_user, col_score, lower_better=True, top_down=True, interp=False): users = df[col_user].unique().tolist() aggrs = df[col_aggreg].unique().tolist() mtx = np.full((len(aggrs), len(users)), fill_value=np.nan) fn_best = np.nanmin if lower_better else np.nanmax for usr, dfg in df.groupby(col_user): for agg, dfgg in dfg.groupby(col_aggreg): mtx[aggrs.index(agg), users.index(usr)] = fn_best(dfgg[col_score]) for j in range(len(users)): vrange = range(len(aggrs)) if top_down else range(len(aggrs))[::-1] for i in (i for i in vrange if interp or not np.isnan(mtx[i, j])): vals = mtx[:i + 1, j] if top_down else mtx[i:, j] mtx[i, j] = fn_best(vals) df_agg = pd.DataFrame(mtx, columns=users, index=aggrs) return df_agg
compute some cumulative statistic over given table, assuming col_aggreg is continues first it is grouped by col_aggreg and chose min/max (according to lower_better) of col_score assuming that col_aggreg is sortable like a timeline do propagation of min/max from past values depending on top_down (which reverse the order) :param df: rich table containing col_aggreg, col_user, col_score :param str col_aggreg: used for grouping assuming to be like a timeline :param str col_user: by this column the scores are assumed to be independent :param str col_score: the scoring value for selecting the best :param bool lower_better: taking min/max of scoring value :param bool top_down: reversing the order according to col_aggreg :param bool interp: in case some scores for col_aggreg are missing, interpolate from past :return DF: table >>> np.random.seed(0) >>> df = pd.DataFrame() >>> df['day'] = np.random.randint(0, 5, 50) >>> df['user'] = np.array(list('abc'))[np.random.randint(0, 3, 50)] >>> df['score'] = np.random.random(50) >>> df_agg = aggregate_user_score_timeline(df, 'day', 'user', 'score') >>> df_agg.round(3) # doctest: +NORMALIZE_WHITESPACE b c a 4 0.447 0.132 0.567 0 0.223 0.005 0.094 3 0.119 0.005 0.094 1 0.119 0.005 0.094 2 0.119 0.005 0.020
https://github.com/borda/birl/blob/cae694f52434e74386a9f1fd2007a218e3a3d670/birl/utilities/evaluate.py#L263-L310
from collections import Counter from itertools import chain import numpy as np import pandas as pd from scipy.spatial import distance from birl.utilities.registration import estimate_affine_transform, get_affine_components, norm_angle def compute_tre(points_1, points_2): nb_common = min([len(pts) for pts in [points_1, points_2] if pts is not None]) if nb_common <= 0: raise ValueError('no common landmarks for metric') points_1 = np.asarray(points_1)[:nb_common] points_2 = np.asarray(points_2)[:nb_common] diffs = np.sqrt(np.sum(np.power(points_1 - points_2, 2), axis=1)) return diffs def compute_target_regist_error_statistic(points_ref, points_est): if not all(pts is not None and list(pts) for pts in [points_ref, points_est]): return [], {'overlap points': 0} lnd_sizes = [len(points_ref), len(points_est)] if min(lnd_sizes) <= 0: raise ValueError('no common landmarks for metric') diffs = compute_tre(points_ref, points_est) inter_dist = distance.cdist(points_ref[:len(diffs)], points_ref[:len(diffs)]) dist = np.mean(inter_dist, axis=0) weights = dist / np.sum(dist) dict_stat = { 'Mean': np.mean(diffs), 'Mean_weighted': np.sum(diffs * weights), 'STD': np.std(diffs), 'Median': np.median(diffs), 'Min': np.min(diffs), 'Max': np.max(diffs), 'overlap points': min(lnd_sizes) / float(max(lnd_sizes)) } return diffs, dict_stat def compute_tre_robustness(points_target, points_init, points_warp): if not all(pts is not None for pts in [points_init, points_target, points_warp]): raise ValueError nb_common = min([len(pts) for pts in [points_init, points_target, points_warp]]) tre_init = compute_tre(points_init[:nb_common], points_target[:nb_common]) tre_final = compute_tre(points_warp[:nb_common], points_target[:nb_common]) robust = np.sum(tre_final < tre_init) / float(len(tre_final)) return robust def compute_affine_transf_diff(points_ref, points_init, points_est): if not all(pts is not None and list(pts) for pts in [points_ref, points_init, points_est]): return {} points_ref = np.nan_to_num(points_ref) mtx_init = estimate_affine_transform(points_ref, np.nan_to_num(points_init))[0] affine_init = get_affine_components(np.asarray(mtx_init)) mtx_est = estimate_affine_transform(points_ref, np.nan_to_num(points_est))[0] affine_estim = get_affine_components(np.asarray(mtx_est)) diff = { 'Affine %s %s Diff' % (n, c): (np.array(affine_estim[n]) - np.array(affine_init[n]))[i] for n in ['translation', 'scale'] for i, c in enumerate(['X', 'Y']) } diff.update({'Affine %s Diff' % n: norm_angle(affine_estim[n] - affine_init[n], deg=True) for n in ['rotation']}) diff.update({'Affine %s Diff' % n: affine_estim[n] - affine_init[n] for n in ['shear']}) return diff def compute_ranking(user_cases, field, reverse=False): users = list(user_cases.keys()) cases = set(chain(*[user_cases[u].keys() for u in user_cases])) for cs in cases: usr_val = [(u, user_cases[u][cs].get(field, np.nan)) for u in users if cs in user_cases[u]] usr_val = sorted(usr_val, key=lambda x: x[1], reverse=reverse) usr_rank = {usr: i + 1 for i, (usr, _) in enumerate(usr_val)} for usr in users: if cs not in user_cases[usr]: user_cases[usr][cs] = {} user_cases[usr][cs][field + '_rank'] = usr_rank.get(usr, len(users)) return user_cases def compute_matrix_user_ranking(df_stat, higher_better=False): ranking = np.zeros(df_stat.values.shape) nan = -np.inf if higher_better else np.inf for i, col in enumerate(df_stat.columns): vals = [v if not np.isnan(v) else nan for v in df_stat[col]] idx_vals = list(zip(range(len(df_stat)), vals)) idx_vals = sorted(idx_vals, key=lambda iv: iv[1], reverse=higher_better) ranking[:, i] = [idx if val != nan else np.nan for idx, val in idx_vals] return ranking def grouping_cumulative(df, col_index, col_column): df_counts = pd.DataFrame() for idx, dfg in df[[col_index, col_column]].groupby(col_index): counts = dict(Counter(dfg[col_column])) counts[col_index] = idx df_counts = df_counts.append(counts, ignore_index=True) df_counts.set_index(col_index, inplace=True) return df_counts
BSD 3-Clause New or Revised License
tonlabs/testsuite4
tonos_ts4/global_functions.py
load_data_cell
python
def load_data_cell(fn): fn = make_path(fn, '.tvc') return Cell(globals.core.load_data_cell(fn))
Loads contract data cell from a compiled contract image with a given name. Returns cell encoded to string :param str fn: The file name :return: Cell object containing contract's data cell :rtype: Cell
https://github.com/tonlabs/testsuite4/blob/c038955c70ca48bf386b65fe8173a929e877d6dc/tonos_ts4/global_functions.py#L234-L243
import os import base64 from . import globals as g from .globals import GRAM, EMPTY_CELL from .util import * from .address import * from .abi import * from . import ts4 def version(): return g.G_VERSION def reset_all(): g.core.reset_all() g.QUEUE = [] g.EVENTS = [] g.ALL_MESSAGES = [] g.NICKNAMES = dict() def set_tests_path(path): g.G_TESTS_PATH = path def init(path, verbose = False, time = None): script_path = os.path.dirname(sys.argv[0]) path = os.path.join( script_path if not os.path.isabs(path) else '', path ) set_tests_path(path) set_verbose(verbose) if time is not None: g.core.set_now(time) def set_verbose(verbose = True): g.G_VERBOSE = verbose def set_stop_at_crash(do_stop): g.G_STOP_AT_CRASH = do_stop def verbose_(msg): verbose(msg, show_always = True, color_red = True) def verbose(msg, show_always = False, color_red = False): if g.G_VERBOSE or show_always: if color_red: msg = red(str(msg)) print(msg) def pop_msg(): assert len(g.QUEUE) > 0 return g.QUEUE.pop(0) def peek_msg(): assert len(g.QUEUE) > 0 return g.QUEUE[0] def pop_event(): assert len(g.EVENTS) > 0 return g.EVENTS.pop(0) def peek_event(): assert len(g.EVENTS) > 0 return g.EVENTS[0] def queue_length(): return len(g.QUEUE) def ensure_queue_empty(): assert eq(0, len(g.QUEUE), msg = ('ensure_queue_empty() -')) def dump_queue(): print(white("g.QUEUE:")) for i in range(len(g.QUEUE)): print(" {}: {}".format(i, g.QUEUE[i])) def set_msg_filter(filter): if filter is True: filter = lambda msg: True if filter is False: filter = None globals.G_MSG_FILTER = filter def register_nickname(addr, nickname): Address.ensure_address(addr) globals.NICKNAMES[addr.str()] = nickname def ensure_address(addr): Address.ensure_address(addr) def zero_addr(wc): return Address.zero_addr(wc) def format_addr(addr, compact = True): Address.ensure_address(addr) if addr.is_none(): return 'addr_none' addr = addr.str() s = addr[:10] if addr in globals.NICKNAMES: s = "{} ({})".format(globals.NICKNAMES[addr], s) else: if not compact: s = 'Addr({})'.format(s) return s def make_keypair(seed = None): (secret_key, public_key) = globals.core.make_keypair(seed) public_key = '0x' + public_key return (secret_key, public_key) def make_path(name, ext): fn = os.path.join(globals.G_TESTS_PATH, name) if not fn.endswith(ext): fn += ext return fn def load_tvc(fn): fn = make_path(fn, '.tvc') with open(fn, 'rb') as fp: str = base64.b64encode(fp.read(1_000_000)).decode('utf-8') return Cell(str) def load_code_cell(fn): fn = make_path(fn, '.tvc') return Cell(globals.core.load_code_cell(fn))
Apache License 2.0
nrel/ditto
scripts/compare.py
main
python
def main(): parser = argparse.ArgumentParser() parser.add_argument('-p1', action='store', dest='path1') parser.add_argument('-p2', action='store', dest='path2') results = parser.parse_args() path1 = results.path1 path2 = results.path2 df1 = pd.read_csv(path1 + '/voltage_profile.csv') df2 = pd.read_csv(path2 + '/voltage_profile.csv') for p, k in zip(['A', 'B', 'C'], [' pu1', ' pu2', ' pu3']): logger.debug('Phase {p} : rms={r}'.format(p=p, r=rms(df1, df2, k))) for p, k in zip(['A', 'B', 'C'], [' pu1', ' pu2', ' pu3']): logger.debug('Phase {p} : |error|={r}'.format(p=p, r=absolute(df1, df2, k)))
Compare two power flow results. **Usage:** $ python compare.py -p1 ./inputs/opendss/ieee_13_node -p2 ./outputs/from_cyme/to_opendss/ieee_13_node This will look for "voltage_profile.csv" in both directories and load them into a Pandas dataframe. For now, this only computes the root mean square error for each phase (in p.u).
https://github.com/nrel/ditto/blob/e97fd0823f74d626edeb69e43d741c3e237964f3/scripts/compare.py#L44-L79
from __future__ import absolute_import, division, print_function from builtins import super, range, zip, round, map import logging import numpy as np import pandas as pd import argparse logger = logging.getLogger(__name__) def rms(df1, df2, key): rms = [] for i1, row1 in df1.iterrows(): for i2, row2 in df2.iterrows(): if row1['Bus'] == row2['Bus']: try: rms.append((row1[key] - row2[key])**2) except: raise ValueError('{} not in dataframe'.format(key)) return sum(rms) def absolute(df1, df2, key): abss = [] for i1, row1 in df1.iterrows(): for i2, row2 in df2.iterrows(): if row1['Bus'] == row2['Bus']: try: abss.append(abs(row1[key] - row2[key])) except: raise ValueError('{} not in dataframe'.format(key)) return sum(abss)
BSD 3-Clause New or Revised License
lvtk/lvtk
waflib/Tools/javaw.py
use_javac_files
python
def use_javac_files(self): self.use_lst = [] self.tmp_use_seen = [] self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: tg = get(x) except Errors.WafError: self.uselib.append(x) else: tg.post() if hasattr(tg, 'jar_task'): self.use_lst.append(tg.jar_task.outputs[0].abspath()) self.javac_task.set_run_after(tg.jar_task) self.javac_task.dep_nodes.extend(tg.jar_task.outputs) else: if hasattr(tg, 'outdir'): base_node = tg.outdir else: base_node = tg.path.get_bld() self.use_lst.append(base_node.abspath()) self.javac_task.dep_nodes.extend([x for x in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) for tsk in tg.tasks: self.javac_task.set_run_after(tsk) if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA: self.java_use_rec(x) self.env.append_value('CLASSPATH', self.use_lst)
Processes the *use* attribute referring to other java compilations
https://github.com/lvtk/lvtk/blob/c9e351c480c7f335ced85cbe1ce599e43ae72d4c/waflib/Tools/javaw.py#L227-L263
import os, shutil from waflib import Task, Utils, Errors, Node from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method, taskgen_method from waflib.Tools import ccroot ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS']) SOURCE_RE = '**/*.java' JAR_RE = '**/*' class_check_source = ''' public class Test { public static void main(String[] argv) { Class lib; if (argv.length < 1) { System.err.println("Missing argument"); System.exit(77); } try { lib = Class.forName(argv[0]); } catch (ClassNotFoundException e) { System.err.println("ClassNotFoundException"); System.exit(1); } lib = null; System.exit(0); } } ''' @feature('javac') @before_method('process_source') def apply_java(self): Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.outdir = outdir self.env.OUTDIR = outdir.abspath() self.javac_task = tsk = self.create_task('javac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp if getattr(self, 'compat', None): tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)]) if hasattr(self, 'sourcepath'): fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)] names = os.pathsep.join([x.srcpath() for x in fold]) else: names = [x.srcpath() for x in tsk.srcdir] if names: tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) @taskgen_method def java_use_rec(self, name, **kw): if name in self.tmp_use_seen: return self.tmp_use_seen.append(name) try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) return else: y.post() if hasattr(y, 'jar_task'): self.use_lst.append(y.jar_task.outputs[0].abspath()) else: if hasattr(y,'outdir'): self.use_lst.append(y.outdir.abspath()) else: self.use_lst.append(y.path.get_bld().abspath()) for x in self.to_list(getattr(y, 'use', [])): self.java_use_rec(x) @feature('javac') @before_method('propagate_uselib_vars') @after_method('apply_java')
ISC License
hasgeek/coaster
coaster/sqlalchemy/roles.py
RoleGrantABC.offered_roles
python
def offered_roles(self): return ()
Roles offered by this object
https://github.com/hasgeek/coaster/blob/3ffbc9d33c981284593445299aaee0c3cc0cdb0b/coaster/sqlalchemy/roles.py#L261-L263
from abc import ABCMeta from copy import deepcopy from functools import wraps from itertools import chain from typing import Dict, List, Optional, Set, Union import collections.abc as abc import operator import warnings from sqlalchemy import event, inspect from sqlalchemy.ext.orderinglist import OrderingList from sqlalchemy.orm import ColumnProperty, Query, RelationshipProperty, SynonymProperty from sqlalchemy.orm.attributes import QueryableAttribute from sqlalchemy.orm.collections import ( InstrumentedDict, InstrumentedList, InstrumentedSet, MappedCollection, ) from sqlalchemy.orm.dynamic import AppenderMixin from sqlalchemy.schema import SchemaItem from flask import _request_ctx_stack from ..auth import current_auth from ..utils import InspectableSet, is_collection, nary_op try: from sqlalchemy.orm import MapperProperty except ImportError: from sqlalchemy.orm.interfaces import MapperProperty __all__ = [ 'RoleGrantABC', 'LazyRoleSet', 'RoleAccessProxy', 'DynamicAssociationProxy', 'RoleMixin', 'with_roles', 'declared_attr_roles', ] __cache__ = {} def _attrs_equal(lhs, rhs): if isinstance(lhs, str) and isinstance(rhs, str): return lhs == rhs return lhs is rhs def _actor_in_relationship(actor, relationship): if actor == relationship: return True if isinstance(relationship, (AppenderMixin, Query, abc.Container)): return actor in relationship return False def _roles_via_relationship(actor, relationship, actor_attr, roles, offer_map): relobj = None if actor_attr is None: if isinstance(relationship, RoleMixin): offered_roles = relationship.roles_for(actor) if offer_map: offered_roles = set( chain.from_iterable( offer_map[role] for role in offered_roles if role in offer_map ) ) return offered_roles raise TypeError( f"{relationship!r} is not a RoleMixin and no actor attribute was specified" ) if isinstance(relationship, (AppenderMixin, Query)): if isinstance(actor_attr, QueryableAttribute): relobj = relationship.filter(operator.eq(actor_attr, actor)).first() else: relobj = relationship.filter_by(**{actor_attr: actor}).first() elif isinstance(relationship, abc.Iterable): for relitem in relationship: if getattr(relitem, actor_attr) == actor: relobj = relitem break elif getattr(relationship, actor_attr) == actor: relobj = relationship if not relobj: return () if isinstance(relobj, RoleGrantABC): offered_roles = relobj.offered_roles if offer_map: offered_roles = set( chain.from_iterable( offer_map[role] for role in offered_roles if role in offer_map ) ) return offered_roles return roles class RoleGrantABC(metaclass=ABCMeta): @property
BSD 2-Clause Simplified License
elemoine/papyrus
papyrus/protocol.py
_get_col_epsg
python
def _get_col_epsg(mapped_class, geom_attr): col = class_mapper(mapped_class).get_property(geom_attr).columns[0] return col.type.srid
Get the EPSG code associated with a geometry attribute. Arguments: geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class.
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L44-L56
import six from pyramid.httpexceptions import (HTTPBadRequest, HTTPMethodNotAllowed, HTTPNotFound) from pyramid.response import Response from shapely.geometry import asShape from shapely.geometry.point import Point from shapely.geometry.polygon import Polygon from sqlalchemy.sql import asc, desc, and_, func from sqlalchemy.orm.util import class_mapper from geoalchemy2.shape import from_shape from geojson import Feature, FeatureCollection, loads, GeoJSON
BSD 2-Clause Simplified License
vicolab/ml-pyxis
pyxis/pyxis.py
decode_data
python
def decode_data(obj): try: if TYPES["str"] == obj[b"type"]: return obj[b"data"] elif TYPES["ndarray"] == obj[b"type"]: return np.fromstring(obj[b"data"], dtype=np.dtype(obj[b"dtype"])).reshape(obj[b"shape"]) else: return obj except KeyError: return obj
Decode a serialised data object. Parameter --------- obj : Python dictionary A dictionary describing a serialised data object.
https://github.com/vicolab/ml-pyxis/blob/b092aac133814d5754ee2a142c080c5d4ae17322/pyxis/pyxis.py#L96-L114
from __future__ import division, print_function import numpy as np try: import lmdb except ImportError: raise ImportError( "Could not import the LMDB library `lmdb`. Please refer " "to https://github.com/dw/py-lmdb/ for installation " "instructions." ) try: import msgpack except ImportError: raise ImportError( "Could not import the MessagePack library `msgpack`. " "Please refer to " "https://github.com/msgpack/msgpack-python for " "installation instructions." ) __all__ = ["Reader", "Writer"] def encode_str(string, encoding="utf-8", errors="strict"): return str(string).encode(encoding=encoding, errors=errors) def decode_str(obj, encoding="utf-8", errors="strict"): return obj.decode(encoding=encoding, errors=errors) TYPES = {"str": 1, "ndarray": 2} NB_DBS = 2 DATA_DB = encode_str("data_db") META_DB = encode_str("meta_db") NB_SAMPLES = encode_str("nb_samples") def encode_data(obj): if isinstance(obj, str): return {b"type": TYPES["str"], b"data": obj} elif isinstance(obj, np.ndarray): return { b"type": TYPES["ndarray"], b"dtype": obj.dtype.str, b"shape": obj.shape, b"data": obj.tobytes(), } else: return obj
MIT License
astropy/astroquery
astroquery/ipac/irsa/irsa_dust/utils.py
parse_number
python
def parse_number(string): num_str = string.split(None, 1)[0] number = float(num_str) return number
Retrieve a number from the string. Parameters ---------- string : str the string to parse Returns ------- number : float the number contained in the string
https://github.com/astropy/astroquery/blob/bffe79a770082d706f0d392b7dfaa542d9ade8e3/astroquery/ipac/irsa/irsa_dust/utils.py#L10-L26
import re import xml.etree.ElementTree as tree import astropy.units as u
BSD 3-Clause New or Revised License
peoplecantfly/icapserver
icapserver.py
BaseICAPRequestHandler.set_enc_status
python
def set_enc_status(self, status): self.enc_status = status msg = 'Encapsulated status: %s' % status LOG.debug(msg)
Set encapsulated status in response ICAP responses can only contain one encapsulated header section. Such section is either an encapsulated HTTP request, or a response. This method can be called to set encapsulated HTTP response's status line.
https://github.com/peoplecantfly/icapserver/blob/4b9b116cc688f06d5bd91bbd845b0dd25364db0f/icapserver.py#L246-L257
import sys import time import random import socket import string import logging import urlparse import SocketServer __version__ = "1.3" __all__ = ['ICAPServer', 'BaseICAPRequestHandler', 'ICAPError', 'set_logger'] LOG = logging.getLogger(__name__) level = logging.INFO logging.basicConfig(level=level, format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s', filename='') def set_logger(lvl='info'): if lvl.lower() not in ['error', 'info', 'debug']: raise ICAPError(500, 'Incorrect logging level.') if lvl.lower() == 'error': LOG.setLevel(logging.ERROR) if lvl.lower() == 'info': LOG.setLevel(logging.INFO) if lvl.lower() == 'debug': LOG.setLevel(logging.DEBUG) class ICAPError(Exception): def __init__(self, code=500, message=None): if message is None: short, long = BaseICAPRequestHandler._responses[code] message = short super(ICAPError, self).__init__(message) self.code = code msg = 'Code: %d Message: %s' % (code, message) LOG.error(msg) class ICAPServer(SocketServer.TCPServer): allow_reuse_address = 1 class BaseICAPRequestHandler(SocketServer.StreamRequestHandler): _sys_version = "Python/" + sys.version.split()[0] _protocol_version = "ICAP/1.0" _server_version = "ICAP/" + __version__ _responses = { 100: ('Continue', 'Request received, please continue'), 101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'), 200: ('OK', 'Request fulfilled, document follows'), 201: ('Created', 'Document created, URL follows'), 202: ('Accepted', 'Request accepted, processing continues off-line'), 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), 204: ('No Content', 'Request fulfilled, nothing follows'), 205: ('Reset Content', 'Clear input form for further input.'), 206: ('Partial Content', 'Partial content follows.'), 300: ('Multiple Choices', 'Object has several resources -- see URI list'), 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), 302: ('Found', 'Object moved temporarily -- see URI list'), 303: ('See Other', 'Object moved -- see Method and URL list'), 304: ('Not Modified', 'Document has not changed since given time'), 305: ('Use Proxy', 'You must use proxy specified in Location to access this resource.'), 307: ('Temporary Redirect', 'Object moved temporarily -- see URI list'), 400: ('Bad Request', 'Bad request syntax or unsupported method'), 401: ('Unauthorized', 'No permission - see authorization schemes'), 402: ('Payment Required', 'No payment - see charging schemes'), 403: ('Forbidden', 'Request forbidden - authorization will not help'), 404: ('Not Found', 'Nothing matches the given URI'), 405: ('Method Not Allowed', 'Specified method is invalid for this resource.'), 406: ('Not Acceptable', 'URI not available in preferred format.'), 407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'), 408: ('Request Timeout', 'Request timed out; try again later.'), 409: ('Conflict', 'Request conflict.'), 410: ('Gone', 'URI no longer exists and has been permanently removed.'), 411: ('Length Required', 'Client must specify Content-Length.'), 412: ('Precondition Failed', 'Precondition in headers is false.'), 413: ('Request Entity Too Large', 'Entity is too large.'), 414: ('Request-URI Too Long', 'URI is too long.'), 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), 416: ('Requested Range Not Satisfiable', 'Cannot satisfy request range.'), 417: ('Expectation Failed', 'Expected condition could not be satisfied.'), 451: ('451 Unavailable For Legal Reasons', 'Resource access is denied for legal reasons,' 'e.g. censorship or government-mandated blocked access.'), 500: ('Internal Server Error', 'Server got itself in trouble'), 501: ('Not Implemented', 'Server does not support this operation'), 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), 503: ('Service Unavailable', 'The server cannot process the request due to a high load'), 504: ('Gateway Timeout', 'The gateway server did not receive a timely response'), 505: ('Protocol Version Not Supported', 'Cannot fulfill request.'), } _weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] _monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def _read_status(self): status = self.rfile.readline().strip().split(' ', 2) LOG.debug(status) return status def _read_request(self): request = self.rfile.readline().strip().split(' ', 2) LOG.debug(request) return request def _read_headers(self): headers = {} while True: line = self.rfile.readline().strip() if line == '': break k, v = line.split(':', 1) headers[k.lower()] = headers.get(k.lower(), []) + [v.strip()] LOG.debug(headers) return headers def read_chunk(self): if not self.has_body or self.eob: self.eob = True return '' line = self.rfile.readline() if line == '': self.eob = True return '' line = line.strip() arr = line.split(';', 1) chunk_size = 0 try: chunk_size = int(arr[0], 16) except ValueError: raise ICAPError(400, 'Protocol error, could not read chunk') if len(arr) > 1 and arr[1].strip() == 'ieof': self.ieof = True value = self.rfile.read(chunk_size) self.rfile.read(2) if value == '': self.eob = True return value def send_chunk(self, data): l = hex(len(data))[2:] self.wfile.write(l + '\r\n' + data + '\r\n') def cont(self): if self.ieof: raise ICAPError(500, 'Tried to continue on ieof condition') self.wfile.write(self._protocol_version + ' ' + '100 Continue\r\n\r\n') self.eob = False
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/django/contrib/contenttypes/models.py
ContentType.get_all_objects_for_this_type
python
def get_all_objects_for_this_type(self, **kwargs): return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
Return all objects of this type for the keyword arguments given.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/contrib/contenttypes/models.py#L171-L175
from collections import defaultdict from django.apps import apps from django.db import models from django.utils.translation import gettext_lazy as _ class ContentTypeManager(models.Manager): use_in_migrations = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._cache = {} def get_by_natural_key(self, app_label, model): try: ct = self._cache[self.db][(app_label, model)] except KeyError: ct = self.get(app_label=app_label, model=model) self._add_to_cache(self.db, ct) return ct def _get_opts(self, model, for_concrete_model): if for_concrete_model: model = model._meta.concrete_model return model._meta def _get_from_cache(self, opts): key = (opts.app_label, opts.model_name) return self._cache[self.db][key] def get_for_model(self, model, for_concrete_model=True): opts = self._get_opts(model, for_concrete_model) try: return self._get_from_cache(opts) except KeyError: pass try: ct = self.get(app_label=opts.app_label, model=opts.model_name) except self.model.DoesNotExist: ct, created = self.get_or_create( app_label=opts.app_label, model=opts.model_name, ) self._add_to_cache(self.db, ct) return ct def get_for_models(self, *models, for_concrete_models=True): results = {} needed_app_labels = set() needed_models = set() needed_opts = defaultdict(list) for model in models: opts = self._get_opts(model, for_concrete_models) try: ct = self._get_from_cache(opts) except KeyError: needed_app_labels.add(opts.app_label) needed_models.add(opts.model_name) needed_opts[opts].append(model) else: results[model] = ct if needed_opts: cts = self.filter( app_label__in=needed_app_labels, model__in=needed_models ) for ct in cts: model = ct.model_class() opts_models = needed_opts.pop(ct.model_class()._meta, []) for model in opts_models: results[model] = ct self._add_to_cache(self.db, ct) for opts, opts_models in needed_opts.items(): ct = self.create( app_label=opts.app_label, model=opts.model_name, ) self._add_to_cache(self.db, ct) for model in opts_models: results[model] = ct return results def get_for_id(self, id): try: ct = self._cache[self.db][id] except KeyError: ct = self.get(pk=id) self._add_to_cache(self.db, ct) return ct def clear_cache(self): self._cache.clear() def _add_to_cache(self, using, ct): key = (ct.app_label, ct.model) self._cache.setdefault(using, {})[key] = ct self._cache.setdefault(using, {})[ct.id] = ct class ContentType(models.Model): app_label = models.CharField(max_length=100) model = models.CharField(_('python model class name'), max_length=100) objects = ContentTypeManager() class Meta: verbose_name = _('content type') verbose_name_plural = _('content types') db_table = 'django_content_type' unique_together = (('app_label', 'model'),) def __str__(self): return self.name @property def name(self): model = self.model_class() if not model: return self.model return str(model._meta.verbose_name) def model_class(self): try: return apps.get_model(self.app_label, self.model) except LookupError: return None def get_object_for_this_type(self, **kwargs): return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
MIT License
quantmind/pulsar
pulsar/apps/data/redis/pubsub.py
RedisPubSub.unsubscribe
python
def unsubscribe(self, *channels): return self._execute('UNSUBSCRIBE', *channels)
Un-subscribe from a list of ``channels``.
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/data/redis/pubsub.py#L63-L66
from ....utils.lib import ProtocolConsumer from ..store import PubSub, PubSubClient from ..channels import Channels class PubsubProtocolConsumer(ProtocolConsumer): def feed_data(self, data): parser = self.connection.parser parser.feed(data) response = parser.get() while response is not False: if not isinstance(response, Exception): if isinstance(response, list): command = response[0] if command == b'message': response = response[1:3] self.producer.broadcast(response) elif command == b'pmessage': response = response[2:4] self.producer.broadcast(response) else: raise response response = parser.get() class RedisPubSub(PubSub): push_connection = None def publish(self, channel, message): if self.protocol: message = self.protocol.encode(message) return self.store.execute('PUBLISH', channel, message) def count(self, *channels): kw = {'subcommand': 'numsub'} return self.store.execute('PUBSUB', 'NUMSUB', *channels, **kw) def count_patterns(self): kw = {'subcommand': 'numpat'} return self.store.execute('PUBSUB', 'NUMPAT', **kw) def channels(self, pattern=None): if pattern: return self.store.execute('PUBSUB', 'CHANNELS', pattern) else: return self.store.execute('PUBSUB', 'CHANNELS') def psubscribe(self, pattern, *patterns): return self._subscribe('PSUBSCRIBE', pattern, *patterns) def punsubscribe(self, *patterns): self._execute('PUNSUBSCRIBE', *patterns) def subscribe(self, channel, *channels): return self._subscribe('SUBSCRIBE', channel, *channels)
BSD 3-Clause New or Revised License
prompt-toolkit/python-prompt-toolkit
prompt_toolkit/input/win32_pipe.py
Win32PipeInput.send_text
python
def send_text(self, text: str) -> None: self.vt100_parser.feed(text) windll.kernel32.SetEvent(self._event)
Send text to the input.
https://github.com/prompt-toolkit/python-prompt-toolkit/blob/ef64785fa6fcbd2be952c9781ff5f1dc9cb011d0/prompt_toolkit/input/win32_pipe.py#L112-L118
from ctypes import windll from ctypes.wintypes import HANDLE from typing import Callable, ContextManager, List from prompt_toolkit.eventloop.win32 import create_win32_event from ..key_binding import KeyPress from ..utils import DummyContext from .base import PipeInput from .vt100_parser import Vt100Parser from .win32 import _Win32InputBase, attach_win32_input, detach_win32_input __all__ = ["Win32PipeInput"] class Win32PipeInput(_Win32InputBase, PipeInput): _id = 0 def __init__(self) -> None: super().__init__() self._event = create_win32_event() self._closed = False self._buffer: List[KeyPress] = [] self.vt100_parser = Vt100Parser(lambda key: self._buffer.append(key)) self.__class__._id += 1 self._id = self.__class__._id @property def closed(self) -> bool: return self._closed def fileno(self) -> int: raise NotImplementedError @property def handle(self) -> HANDLE: return self._event def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: return attach_win32_input(self, input_ready_callback) def detach(self) -> ContextManager[None]: return detach_win32_input(self) def read_keys(self) -> List[KeyPress]: result = self._buffer self._buffer = [] windll.kernel32.ResetEvent(self._event) return result def flush_keys(self) -> List[KeyPress]: self.vt100_parser.flush() result = self._buffer self._buffer = [] return result def send_bytes(self, data: bytes) -> None: self.send_text(data.decode("utf-8", "ignore"))
BSD 3-Clause New or Revised License
phimal/deepymod
src/deepymod/data/base.py
Dataset._reshape
python
def _reshape(coords, data): coords = coords.reshape([-1, coords.shape[-1]]) data = data.reshape([-1, data.shape[-1]]) return coords, data
Reshape the coordinates and data to the format [number_of_samples, number_of_features]
https://github.com/phimal/deepymod/blob/47c33667b6d89b5ca65d9e950773d8ac4a3ca0b9/src/deepymod/data/base.py#L175-L179
import torch import numpy as np from numpy import ndarray from numpy.random import default_rng from deepymod.data.samples import Subsampler from abc import ABC, ABCMeta, abstractmethod class Dataset(torch.utils.data.Dataset): def __init__( self, load_function, apply_normalize=None, apply_noise=None, apply_shuffle=None, shuffle=True, subsampler: Subsampler = None, load_kwargs: dict = {}, preprocess_kwargs: dict = { "random_state": 42, "noise_level": 0.0, "normalize_coords": False, "normalize_data": False, }, subsampler_kwargs: dict = {}, device: str = None, ): self.load = load_function self.subsampler = subsampler self.load_kwargs = load_kwargs self.preprocess_kwargs = preprocess_kwargs self.subsampler_kwargs = subsampler_kwargs if apply_normalize != None: self.apply_normalize = apply_normalize if apply_noise != None: self.apply_normalize = apply_noise if apply_shuffle != None: self.apply_shuffle = apply_shuffle self.device = device self.shuffle = shuffle self.coords, self.data = self.load(**self.load_kwargs) assert ( len(self.coords.shape) >= 2 ), "Please explicitely specify a feature axis for the coordinates" assert ( len(self.data.shape) >= 2 ), "Please explicitely specify a feature axis for the data" self.coords, self.data = self.preprocess( self.coords, self.data, **self.preprocess_kwargs ) if self.subsampler: self.coords, self.data = self.subsampler.sample( self.coords, self.data, **self.subsampler_kwargs ) if len(self.data.shape) != 2 or len(self.coords.shape) != 2: self.coords, self.data = self._reshape(self.coords, self.data) if self.shuffle: self.coords, self.data = self.apply_shuffle(self.coords, self.data) self.number_of_samples = self.data.shape[0] print("Dataset is using device: ", self.device) if self.device: self.coords = self.coords.to(self.device) self.data = self.data.to(self.device) def __len__(self) -> int: return self.number_of_samples def __getitem__(self, idx: int) -> int: return self.coords[idx], self.data[idx] def get_coords(self): return self.coords def get_data(self): return self.data def preprocess( self, X: torch.tensor, y: torch.tensor, random_state: int = 42, noise_level: float = 0.0, normalize_coords: bool = False, normalize_data: bool = False, ): y_processed = y + self.apply_noise(y, noise_level, random_state) if normalize_coords: X_processed = self.apply_normalize(X) else: X_processed = X if normalize_data: y_processed = self.apply_normalize(y_processed) else: y_processed = y return X_processed, y_processed @staticmethod def apply_noise(y, noise_level, random_state): noise = noise_level * torch.std(y).data y_noisy = y + torch.tensor( default_rng(random_state).normal(loc=0.0, scale=noise, size=y.shape), dtype=torch.float32, ) return y_noisy @staticmethod def apply_normalize(X): X_norm = (X - X.view(-1, X.shape[-1]).min(dim=0).values) / ( X.view(-1, X.shape[-1]).max(dim=0).values - X.view(-1, X.shape[-1]).min(dim=0).values ) * 2 - 1 return X_norm @staticmethod def apply_shuffle(coords, data): permutation = np.random.permutation(np.arange(len(data))) return coords[permutation], data[permutation] @staticmethod
MIT License
minitriga/silverpeak_python
silverpeak/silverpeak.py
Silverpeak.get_appliance
python
def get_appliance(self, applianceID): url = '{}/appliance/{}'.format(self.base_url, applianceID) return self._get(self.session, url)
Get device information for one Appliance :param applianceID: Device Primary Key for Appliance :return: Result named tuple.
https://github.com/minitriga/silverpeak_python/blob/2f2509cad3904d91a6a207ae473df3a2624c32e9/silverpeak/silverpeak.py#L266-L274
from collections import namedtuple import requests from requests.exceptions import ConnectionError from .exceptions import LoginCredentialsError, LoginTimeoutError HTTP_SUCCESS_CODES = { 200: 'Success', 204: 'No Content', } HTTP_ERROR_CODES = { 400: 'Bad Request', 403: 'Forbidden', 404: 'API Not found', 406: 'Not Acceptable Response', 415: 'Unsupported Media Type', 500: 'Internal Server Error' } HTTP_RESPONSE_CODES = dict() HTTP_RESPONSE_CODES.update(HTTP_SUCCESS_CODES) HTTP_RESPONSE_CODES.update(HTTP_ERROR_CODES) Result = namedtuple('Result', [ 'ok', 'status_code', 'error', 'reason', 'data', 'response' ]) def parse_http_success(response): if response.request.method in ['GET']: reason = HTTP_RESPONSE_CODES[response.status_code] error = '' if 'json' in response.headers.get('Content-Type'): json_response = response.json() elif 'text' in response.headers.get('Content-Type'): json_response = response.text else: json_response = dict() reason = HTTP_RESPONSE_CODES[response.status_code] error = 'No data received from device' else: reason = HTTP_RESPONSE_CODES[response.status_code] error = '' if response.text: json_response = response.text else: json_response = dict() result = Result( ok=response.ok, status_code=response.status_code, reason=reason, error=error, data=json_response, response=response, ) return result def parse_http_error(response): try: json_response = dict() reason = response.json()['error']['details'] error = response.json()['error']['message'] except ValueError as e: json_response = dict() error = e if HTTP_RESPONSE_CODES[response.status_code]: reason = HTTP_RESPONSE_CODES[response.status_code] if response.text: error = response.text result = Result( ok=response.ok, status_code=response.status_code, reason=reason, error=error, data=json_response, response=response, ) return result def parse_response(response): if response.status_code in HTTP_SUCCESS_CODES: return parse_http_success(response) elif response.status_code in HTTP_ERROR_CODES: return parse_http_error(response) class Silverpeak(object): def __init__(self, user, user_pass, sp_server, sp_port="443", verify=False, disable_warnings=False, proxies=None, timeout=10, auto_login=True): self.user = user self.user_pass = user_pass self.sp_server = sp_server self.sp_port = sp_port self.timeout = timeout self.auto_login = auto_login self.verify = verify self.disable_warnings = disable_warnings if self.disable_warnings: requests.packages.urllib3.disable_warnings() self.base_url = 'https://{}:{}/gms/rest'.format( self.sp_server, self.sp_port ) self.session = requests.session() if proxies is not None: self.session.proxies = proxies if not self.verify: self.session.verify = self.verify if self.auto_login: self.login_result = self.login() def login(self): requestData = { "user": self.user, "password": self.user_pass } try: login_result = self._post( session=self.session, url='{}/authentication/login'.format(self.base_url), headers={'Content-Type': 'application/json'}, json=requestData, timeout=self.timeout ) except ConnectionError: raise LoginTimeoutError( 'Could not connect to {}'.format(self.sp_server)) if login_result.response.text.startswith('wrong credentials'): raise LoginCredentialsError( 'Could not login to device, check user credentials') else: return login_result @staticmethod def _get(session, url, headers=None, timeout=10): if headers is None: headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} return parse_response(session.get( url=url, headers=headers, timeout=timeout)) @staticmethod def _post(session, url, headers=None, data=None, json=None, timeout=10): if headers is None: headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} if data is None: data = dict() if json is None: json = dict() return parse_response(session.post( url=url, headers=headers, data=data, json=json, timeout=timeout)) @staticmethod def _put(session, url, headers=None, data=None, json=None, timeout=10): if headers is None: headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} return parse_response(session.put( url=url, headers=headers, data=data, json=json, timeout=timeout)) @staticmethod def _delete(session, url, headers=None, timeout=10): if headers is None: headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} return parse_response(session.delete( url=url, headers=headers, timeout=timeout)) def get_appliances(self): url = '{}/appliance'.format(self.base_url) return self._get(self.session, url) def delete_appliance(self, applianceID): url = '{}/appliance/{}'.format(self.base_url, applianceID) return self._delete(self.session, url)
MIT License
xuru/pyvisdk
pyvisdk/do/warning_upgrade_event.py
WarningUpgradeEvent
python
def WarningUpgradeEvent(vim, *args, **kwargs): obj = vim.client.factory.create('ns0:WarningUpgradeEvent') if (len(args) + len(kwargs)) < 5: raise IndexError('Expected at least 6 arguments got: %d' % len(args)) required = [ 'message', 'chainId', 'createdTime', 'key', 'userName' ] optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs', 'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
This event is a general warning event from upgrade.
https://github.com/xuru/pyvisdk/blob/de24eb4426eb76233dc2e57640d3274ffd304eb3/pyvisdk/do/warning_upgrade_event.py#L11-L33
import logging from pyvisdk.exceptions import InvalidArgumentError log = logging.getLogger(__name__)
MIT License
michael-f-ellis/nearlypurepythonwebappdemo
server.py
stategen
python
def stategen(): last = time.time() counter = 0 nitems = common.nitems statekeys = common.statekeys _state['step'] = (-common.stepsize, 0.0, common.stepsize) _state['stepsize'] = common.stepsize statevalues = [round(random.random()*10, 2) for n in range(nitems)] _state.update(dict(zip(statekeys, statevalues))) while True: now = time.time() if now - last >= 0.5: last = now counter += 1 step = _state['step'] statevalues = [round(v + random.choice(step), 2) for v in statevalues] statevalues = [min(10.0, max(0.0, v)) for v in statevalues] _state.update(dict(zip(statekeys, statevalues))) _state['count'] = counter yield
Initialize each state item with a random float between 0 and 10, then on each next() call, 'walk' the value by a randomly chosen increment. The purpose is to simulate a set of drifting measurements to be displayed and color coded on the client side.
https://github.com/michael-f-ellis/nearlypurepythonwebappdemo/blob/6daf33901c9d8c0ed9330f58aa906bcc35d42987/server.py#L93-L119
import os import sys import time import doctest import random import subprocess import bottle import common from traceback import format_exc from htmltree.htmltree import * import client app = bottle.Bottle() request = bottle.request def buildIndexHtml(): style = Style(**{'a:link':dict(color='red'), 'a:visited':dict(color='green'), 'a:hover':dict(color='hotpink'), 'a:active':dict(color='blue'), }) head = Head(style, Script(src='/client.js', charset='UTF-8')) body = Body("Replace me on the client side", style=dict(background_color='black')) doc = Html(head, body) return doc.render() @app.route('/client.js') def client(): root = os.path.abspath("./__javascript__") return bottle.static_file('client.js', root=root) @app.route("/") @app.route("/index.html") @app.route("/home") def index(): root = os.path.abspath("./__html__") return bottle.static_file('index.html', root=root) _state = {}
MIT License
queequeg92/dualpathnet
fashion_mnist.py
MNIST.__getitem__
python
def __getitem__(self, index): img, target = self.data[index], self.labels[index] img = Image.fromarray(img.numpy(), mode='L') if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target
Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class.
https://github.com/queequeg92/dualpathnet/blob/f9a5460d8864f78741fde8519f9a32c07e0c2153/fashion_mnist.py#L78-L97
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable from collections import namedtuple from matplotlib import pyplot as plt from PIL import Image import os import os.path import errno import codecs import copy from six.moves import urllib import gzip class MNIST(torch.utils.data.Dataset): mnist_base_url = 'http://yann.lecun.com/exdb/mnist/' fasion_base_url = 'https://cdn.rawgit.com/zalandoresearch/fashion-mnist/ed8e4f3b/data/fashion/' base_url = fasion_base_url urls = [ base_url+'train-images-idx3-ubyte.gz', base_url+'train-labels-idx1-ubyte.gz', base_url+'t10k-images-idx3-ubyte.gz', base_url+'t10k-labels-idx1-ubyte.gz',] raw_folder = 'raw' processed_folder = 'processed' training_file = 'training.pt' test_file = 'test.pt' def __init__(self, root, dataset='train', transform=None, target_transform=None, download=False, force_download=False): self.root = os.path.expanduser(root) self.transform = transform self.target_transform = target_transform self.dataset = dataset self.force_download = force_download if download: self.download() if not self._check_exists(): raise RuntimeError('Dataset not found.' + ' You can use download=True to download it') if self.dataset == 'train': self.data, self.labels = torch.load(os.path.join(root, self.processed_folder, self.training_file)) else: self.data, self.labels = torch.load(os.path.join(root, self.processed_folder, self.test_file))
Apache License 2.0
rougeth/bottery
bottery/platforms.py
BaseEngine.message_handler
python
async def message_handler(self, data): message = self.build_message(data) if not message: logger.error( '[%s] Unable to build Message with data, data=%s, error', self.engine_name, data ) return logger.info('[%s] New message from %s: %s', self.engine_name, message.user, message.text) response = await self.get_response(message) if response: await self.send_response(response)
For each new message, build its platform specific message object and get a response.
https://github.com/rougeth/bottery/blob/1c724b867fa16708d59a3dbba5dd2c3de85147a9/bottery/platforms.py#L92-L112
import inspect import logging from bottery.conf import settings from bottery.message import Response logger = logging.getLogger('bottery.platforms') class BaseEngine: def __init__(self, **kwargs): self.tasks = [] kwargs['engine_name'] = kwargs.get('engine_name', '') for item, value in kwargs.items(): setattr(self, item, value) @property def platform(self): raise NotImplementedError('platform attribute not implemented') def build_message(self): raise NotImplementedError('build_message not implemented') async def configure(self): raise NotImplementedError('configure not implemented') async def _get_response(self, message): view = self.discovery_view(message) if not view: return if inspect.iscoroutinefunction(view): response = await view(message) else: response = view(message) return self.prepare_response(response, message) def prepare_response(self, response, message): if isinstance(response, Response): return response if isinstance(response, str): return Response(source=message, text=response) if response is not None: logger.error( '[%s] View should only return str or Response', self.engine_name, ) return None async def prepare_get_response(self): get_response = self._get_response for middleware in reversed(settings.MIDDLEWARES): get_response = await middleware(get_response) return get_response async def get_response(self, message): f = await self.prepare_get_response() return await f(message) def discovery_view(self, message): for handler in self.registered_handlers: if handler.check(message): return handler.view return None
MIT License
aminyazdanpanah/python-ffmpeg-video-streaming
ffmpeg_streaming/_command_builder.py
stream_args
python
def stream_args(media): return getattr(sys.modules[__name__], "_%s" % type(media).__name__.lower())(media)
@TODO: add documentation
https://github.com/aminyazdanpanah/python-ffmpeg-video-streaming/blob/731530fd9e569362f9ba30d723b395bf0d011eb3/ffmpeg_streaming/_command_builder.py#L128-L132
import sys from ._utiles import cnv_options_to_args, get_path_info, clean_args USE_TIMELINE = 1 USE_TEMPLATE = 1 HLS_LIST_SIZE = 0 HLS_TIME = 10 HLS_ALLOW_CACHE = 1 def _stream2file(stream2file): args = stream2file.format.all args.update({'c': 'copy'}) args.update(stream2file.options) return cnv_options_to_args(args) + [stream2file.output_] def _get_audio_bitrate(rep, index: int = None): if rep.bitrate.audio_ is not None and rep.bitrate.audio_ != 0: opt = 'b:a' if index is None else 'b:a:' + str(index) return {opt: rep.bitrate.audio} return {} def _get_dash_stream(key, rep): args = { 'map': 0, 's:v:' + str(key): rep.size, 'b:v:' + str(key): rep.bitrate.calc_video() } args.update(_get_audio_bitrate(rep, key)) args.update(rep.options) return cnv_options_to_args(args) def _dash(dash): dirname, name = get_path_info(dash.output_) _args = dash.format.all _args.update({ 'use_timeline': USE_TIMELINE, 'use_template': USE_TEMPLATE, 'init_seg_name': '{}_init_$RepresentationID$.$ext$'.format(name), "media_seg_name": '{}_chunk_$RepresentationID$_$Number%05d$.$ext$'.format(name), 'f': 'dash' }) _args.update(dash.options) args = cnv_options_to_args(_args) for key, rep in enumerate(dash.reps): args += _get_dash_stream(key, rep) return args + ['-strict', '-2', '{}/{}.mpd'.format(dirname, name)] def _hls_seg_ext(hls): return 'm4s' if hls.options.get('hls_segment_type', '') == 'fmp4' else 'ts' def _get_hls_stream(hls, rep, dirname, name): args = hls.format.all args.update({ 'hls_list_size': HLS_LIST_SIZE, 'hls_time': HLS_TIME, 'hls_allow_cache': HLS_ALLOW_CACHE, 'hls_segment_filename': "{}/{}_{}p_%04d.{}".format(dirname, name, rep.size.height, _hls_seg_ext(hls)), 'hls_fmp4_init_filename': "{}_{}p_init.mp4".format(name, rep.size.height), 's:v': rep.size, 'b:v': rep.bitrate.calc_video() }) args.update(_get_audio_bitrate(rep)) args.update(rep.options) args.update({'strict': '-2'}) args.update(hls.options) return cnv_options_to_args(args) + ["{}/{}_{}p.m3u8".format(dirname, name, str(rep.size.height))] def _hls(hls): dirname, name = get_path_info(hls.output_) streams = [] for key, rep in enumerate(hls.reps): if key > 0: streams += input_args(hls) streams += _get_hls_stream(hls, rep, dirname, name) return streams
MIT License
hypergan/hyperchamber
hyperchamber/selector.py
Selector.get_config_value
python
def get_config_value(self, k, i): if(not isinstance(self.store[k], list)): return self.store[k] else: return self.store[k][i]
Gets the ith config value for k. e.g. get_config_value('x', 1)
https://github.com/hypergan/hyperchamber/blob/f9b92f5518f1b23e6873e7da63bc31aac3819aae/hyperchamber/selector.py#L44-L49
from json import JSONEncoder import random import os import uuid import json from .config import Config class HCEncoder(JSONEncoder): def default(self, o): if(hasattr(o, '__call__')): return "function:" +o.__module__+"."+o.__name__ else: try: return o.__dict__ except AttributeError: try: return str(o) except AttributeError: return super(o) class Selector: def __init__(self, initialStore = {}): self.store = initialStore self.results = [] def set(self, key, value): self.store[key]=value return self.store def count_configs(self): count = 1 for key in self.store: value = self.store[key] if(isinstance(value,list)): count *= len(value) return count
MIT License
quantconnect/lean-cli
lean/commands/data/download.py
_display_products
python
def _display_products(organization: QCFullOrganization, products: List[Product]) -> None: logger = container.logger() table = Table(box=box.SQUARE) for column in ["Dataset", "Vendor", "Details", "File count", "Price"]: table.add_column(column, overflow="fold") summed_price = 0 for product in products: details = [] for option_id, result in product.option_results.items(): option = next(o for o in product.dataset.options if o.id == option_id) if result is not None: label = option.label if isinstance(result.value, list): if len(result.value) > 1: label = label.replace("(s)", "s") else: label = label.replace("(s)", "") details.append(f"{label}: {result.label}") if len(details) == 0: details.append("-") mapped_files = _map_data_files_to_vendors(organization, product.get_data_files()) price = sum(data_file.vendor.price for data_file in mapped_files) summed_price += price table.add_row(product.dataset.name, product.dataset.vendor, "\n".join(details), f"{len(mapped_files):,.0f}", f"{price:,.0f} QCC") logger.info(table) all_data_files = _get_data_files(organization, products) total_price = sum(data_file.vendor.price for data_file in all_data_files) if total_price != summed_price: logger.warn("The total price is less than the sum of all separate prices because there is overlapping data") logger.info(f"Total price: {total_price:,.0f} QCC") logger.info(f"Organization balance: {organization.credit.balance:,.0f} QCC")
Previews a list of products in pretty tables. :param organization: the organization the user selected :param products: the products to display
https://github.com/quantconnect/lean-cli/blob/d154ddd2eea6a889786c48c42ce114a7dcf19932/lean/commands/data/download.py#L95-L146
import itertools import re import webbrowser from collections import OrderedDict from typing import Iterable, List, Optional import click from rich import box from rich.table import Table from lean.click import LeanCommand, ensure_options from lean.container import container from lean.models.api import QCDataInformation, QCDataVendor, QCFullOrganization, QCDatasetDelivery from lean.models.data import Dataset, DataFile, Product from lean.models.logger import Option _data_information: Optional[QCDataInformation] = None def _get_data_information(organization: QCFullOrganization) -> QCDataInformation: global _data_information if _data_information is None: _data_information = container.api_client().data.get_info(organization.id) return _data_information def _map_data_files_to_vendors(organization: QCFullOrganization, data_files: Iterable[str]) -> List[DataFile]: data_information = _get_data_information(organization) last_vendor: Optional[QCDataVendor] = None mapped_files = [] for file in data_files: if last_vendor is not None and last_vendor.regex.search(file): mapped_files.append(DataFile(file=file, vendor=last_vendor)) continue last_vendor = None for vendor in data_information.prices: if vendor.price is None: continue if vendor.regex.search(file): mapped_files.append(DataFile(file=file, vendor=vendor)) last_vendor = vendor break if last_vendor is None: raise RuntimeError(f"There is no data vendor that sells '{file}'") return mapped_files def _get_data_files(organization: QCFullOrganization, products: List[Product]) -> List[DataFile]: unique_data_files = sorted(list(set(itertools.chain(*[product.get_data_files() for product in products])))) return _map_data_files_to_vendors(organization, unique_data_files)
Apache License 2.0
tunein/maestro
maestro/config/config_validator.py
validate_runtime
python
def validate_runtime(config_runtime): print(color.CYAN + "validating runtime %s..." % config_runtime + color.END) if any(runtime in config_runtime for runtime in AVAIL_RUNTIMES): return True else: print(color.RED + "Not a valid runtime" + color.END) sys.exit(1)
Validates that the language we want to use is supported by AWS args: config_runtime: runtime (nodejs, python, .NET, etc) pulled from config file
https://github.com/tunein/maestro/blob/789205fdbe85242189c50e407445c57ca916e42c/maestro/config/config_validator.py#L61-L73
import boto3 import sys import json import os from time import gmtime, strftime from botocore.exceptions import ClientError import maestro.config.lambda_config as lambda_config class color: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' iam = boto3.resource('iam') roles = boto3.client('iam') AVAIL_RUNTIMES = lambda_config.AVAIL_RUNTIMES AVAIL_ACTIONS = lambda_config.AVAIL_ACTIONS ACCEPTED_LOG_EXPIRATION = lambda_config.ACCEPTED_LOG_EXPIRATION def validate_file_type(doc): print(color.CYAN + "validating file type..." + color.END) if len(doc)>0: if doc.lower().endswith('.json'): return True sys.exit(1) print(color.RED + "Please enter a valid json document after your action" + color.END) def validate_action(current_action): if any(action in current_action for action in AVAIL_ACTIONS): return True else: print(color.RED + "Not a valid action" + color.END) sys.exit(1)
Apache License 2.0
vutran1710/pyratelimiter
pyrate_limiter/bucket.py
RedisClusterBucket.get_connection
python
def get_connection(self): from rediscluster import RedisCluster return RedisCluster(connection_pool=self._pool)
Obtain a connection from redis pool
https://github.com/vutran1710/pyratelimiter/blob/5234530545b21f1f1aff2e70f3329a5b288cd53a/pyrate_limiter/bucket.py#L188-L193
from abc import ABC, abstractmethod from queue import Queue from threading import RLock from typing import List, Tuple from .exceptions import InvalidParams class AbstractBucket(ABC): def __init__(self, maxsize=0, **_kwargs): self._maxsize = maxsize def maxsize(self) -> int: return self._maxsize @abstractmethod def size(self) -> int: @abstractmethod def put(self, item: float) -> int: @abstractmethod def get(self, number: int) -> float: @abstractmethod def all_items(self) -> List[float]: def inspect_expired_items(self, time: float) -> Tuple[int, float]: volume = self.size() item_count, remaining_time = 0, 0 for log_idx, log_item in enumerate(self.all_items()): if log_item > time: item_count = volume - log_idx remaining_time = log_item - time break return item_count, remaining_time class MemoryQueueBucket(AbstractBucket): def __init__(self, maxsize=0, **_kwargs): super().__init__() self._q = Queue(maxsize=maxsize) def size(self): return self._q.qsize() def put(self, item): return self._q.put(item) def get(self, number): counter = 0 for _ in range(number): self._q.get() counter += 1 return counter def all_items(self): return list(self._q.queue) class MemoryListBucket(AbstractBucket): def __init__(self, maxsize=0, **_kwargs): super().__init__(maxsize=maxsize) self._q = [] self._lock = RLock() def size(self): return len(self._q) def put(self, item): with self._lock: if self.size() < self.maxsize(): self._q.append(item) return 1 return 0 def get(self, number): with self._lock: counter = 0 for _ in range(number): self._q.pop(0) counter += 1 return counter def all_items(self): return self._q.copy() class RedisBucket(AbstractBucket): def __init__( self, maxsize=0, redis_pool=None, bucket_name: str = None, identity: str = None, **_kwargs, ): super().__init__(maxsize=maxsize) if not bucket_name or not isinstance(bucket_name, str): msg = "keyword argument bucket-name is missing: a distict name is required" raise InvalidParams(msg) self._pool = redis_pool self._bucket_name = f"{bucket_name}___{identity}" def get_connection(self): from redis import Redis return Redis(connection_pool=self._pool) def get_pipeline(self): conn = self.get_connection() pipeline = conn.pipeline() return pipeline def size(self): conn = self.get_connection() return conn.llen(self._bucket_name) def put(self, item): conn = self.get_connection() current_size = conn.llen(self._bucket_name) if current_size < self.maxsize(): conn.rpush(self._bucket_name, item) return 1 return 0 def get(self, number): pipeline = self.get_pipeline() counter = 0 for _ in range(number): pipeline.lpop(self._bucket_name) counter += 1 pipeline.execute() return counter def all_items(self): conn = self.get_connection() items = conn.lrange(self._bucket_name, 0, -1) return [float(i.decode("utf-8")) for i in items] class RedisClusterBucket(RedisBucket):
MIT License
cuthbertlab/music21-tools
bhadley/harmonyRealizer.py
generateSmoothBassLine
python
def generateSmoothBassLine(harmonyObjects): s = stream.Score() s.append(clef.BassClef()) harmonyObjects[0].bass().octave = 2 lastBass = harmonyObjects[0].bass() s.append(note.Note(lastBass)) for cs in harmonyObjects[1:]: cs.bass().octave = lastBass.octave sameOctave = interval.Interval(lastBass, copy.deepcopy(cs.bass())) cs.bass().octave += 1 octavePlus = interval.Interval(lastBass, copy.deepcopy(cs.bass())) cs.bass().octave = cs.bass().octave - 2 octaveMinus = interval.Interval(lastBass, copy.deepcopy(cs.bass())) l = [sameOctave, octavePlus, octaveMinus] minimum = sameOctave.generic.undirected ret = sameOctave for i in l: if i.generic.undirected < minimum: minimum = i.generic.undirected ret = i if ret.noteEnd.octave > 3 or ret.noteEnd.octave < 1: ret.noteEnd.octave = lastBass.octave cs.bass().octave = ret.noteEnd.octave lastBass = cs.bass() s.append(note.Note(cs.bass())) return harmonyObjects
accepts a list of harmony.chordSymbol objects and returns that same list with a computer generated octave assigned to each bass note. The algorithm is under development, but currently works like this: 1. assigns octave of 2 to the first bass note 2. iterates through each of the following bass notes corresponding to the chordSymbol i. creates three generic intervals between the previous bass note and the current bass note, all using the previous bass note's newly defined octave and one of three current bass note octaves: 1. the last bass note's octave 2. the last bass note's octave + 1 3. the last bass note's octave - 1 ii. evaluates the size of each of the three intervals above (using interval.generic.undirected) and finds the smallest size iii. assigns the bass note octave that yields this smallest interval to the current bass note - if the newly found octave is determined to be greater than 3 or less than 1, the bass note octave is assigned to the last bass note's octave iv. updates the previous bass note, and the iteration continues 3. returns list of chordSymbols, with computer generated octaves assigned
https://github.com/cuthbertlab/music21-tools/blob/78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee/bhadley/harmonyRealizer.py#L62-L111
from music21 import clef from music21 import harmony from music21 import interval from music21 import metadata from music21 import note from music21 import roman from music21 import stream from music21 import corpus from music21.figuredBass import realizer, rules from music21.romanText import clercqTemperley import copy import unittest def generateContrapuntalBassLine(harmonyObject, fbRules): fbLine = realizer.FiguredBassLine() for o in harmonyObject: fbLine.addElement(o) allSols = fbLine.realize(fbRules) return allSols.generateRandomRealizations(1)
BSD 3-Clause New or Revised License
docusign/docusign-python-client
docusign_esign/models/offline_attributes.py
OfflineAttributes.account_esign_id
python
def account_esign_id(self, account_esign_id): self._account_esign_id = account_esign_id
Sets the account_esign_id of this OfflineAttributes. A GUID identifying the account associated with the consumer disclosure # noqa: E501 :param account_esign_id: The account_esign_id of this OfflineAttributes. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/offline_attributes.py#L86-L95
import pprint import re import six from docusign_esign.client.configuration import Configuration class OfflineAttributes(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_esign_id': 'str', 'device_model': 'str', 'device_name': 'str', 'gps_latitude': 'str', 'gps_longitude': 'str', 'offline_signing_hash': 'str' } attribute_map = { 'account_esign_id': 'accountEsignId', 'device_model': 'deviceModel', 'device_name': 'deviceName', 'gps_latitude': 'gpsLatitude', 'gps_longitude': 'gpsLongitude', 'offline_signing_hash': 'offlineSigningHash' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._account_esign_id = None self._device_model = None self._device_name = None self._gps_latitude = None self._gps_longitude = None self._offline_signing_hash = None self.discriminator = None setattr(self, "_{}".format('account_esign_id'), kwargs.get('account_esign_id', None)) setattr(self, "_{}".format('device_model'), kwargs.get('device_model', None)) setattr(self, "_{}".format('device_name'), kwargs.get('device_name', None)) setattr(self, "_{}".format('gps_latitude'), kwargs.get('gps_latitude', None)) setattr(self, "_{}".format('gps_longitude'), kwargs.get('gps_longitude', None)) setattr(self, "_{}".format('offline_signing_hash'), kwargs.get('offline_signing_hash', None)) @property def account_esign_id(self): return self._account_esign_id @account_esign_id.setter
MIT License
sebdah/dynamic-dynamodb
dynamic_dynamodb/statistics/gsi.py
get_consumed_read_units_percent
python
def get_consumed_read_units_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_read_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_read_units = 0 try: gsi_read_units = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) consumed_read_units_percent = ( float(consumed_read_units) / float(gsi_read_units) * 100) except JSONResponseError: raise logger.info('{0} - GSI: {1} - Consumed read units: {2:.2f}%'.format( table_name, gsi_name, consumed_read_units_percent)) return consumed_read_units_percent
Returns the number of consumed read units in percent :type table_name: str :param table_name: Name of the DynamoDB table :type gsi_name: str :param gsi_name: Name of the GSI :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Number of consumed reads as a percentage of provisioned reads
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/statistics/gsi.py#L14-L58
from datetime import datetime, timedelta from boto.exception import JSONResponseError, BotoServerError from retrying import retry from dynamic_dynamodb.aws import dynamodb from dynamic_dynamodb.log_handler import LOGGER as logger from dynamic_dynamodb.aws.cloudwatch import ( CLOUDWATCH_CONNECTION as cloudwatch_connection)
Apache License 2.0
climdyn/qgs
qgs/integrators/integrator.py
RungeKuttaTglsIntegrator.integrate
python
def integrate(self, t0, t, dt, ic=None, tg_ic=None, forward=True, adjoint=False, inverse=False, boundary=None, write_steps=1): if self.func is None or self.func_jac is None: print('No function to integrate defined!') return 0 if ic is None: if self.ic is None: if self.n_dim is not None: i = self.n_dim else: i = 1 while True: self.ic = np.zeros(i) try: x = self.func(0., self.ic) except: i += 1 else: break i = len(self.func(0., self.ic)) self.ic = np.zeros(i) else: self.ic = ic if len(self.ic.shape) == 1: self.ic = self.ic.reshape((1, -1)) self.n_traj = self.ic.shape[0] self.n_dim = self.ic.shape[1] self._time = np.concatenate((np.arange(t0, t, dt), np.full((1,), t))) self._write_steps = write_steps if tg_ic is None: tg_ic = np.eye(self.ic.shape[1]) tg_ic_sav = tg_ic.copy() if len(tg_ic.shape) == 1: tg_ic = tg_ic.reshape((1, -1, 1)) ict = tg_ic.copy() for i in range(self.n_traj-1): ict = np.concatenate((ict, tg_ic)) self.tg_ic = ict elif len(tg_ic.shape) == 2: if tg_ic.shape[0] == self.n_traj: self.tg_ic = tg_ic[..., np.newaxis] else: tg_ic = tg_ic[np.newaxis, ...] tg_ic = np.swapaxes(tg_ic, 1, 2) ict = tg_ic.copy() for i in range(self.n_traj-1): ict = np.concatenate((ict, tg_ic)) self.tg_ic = ict elif len(tg_ic.shape) == 3: if tg_ic.shape[1] != self.n_dim: self.tg_ic = np.swapaxes(tg_ic, 1, 2) if forward: self._time_direction = 1 else: self._time_direction = -1 self._adjoint = adjoint if boundary is None: self._boundary = _zeros_func else: self._boundary = boundary self._inverse = 1. if inverse: self._inverse *= -1. if write_steps == 0: self.n_records = 1 else: tot = self._time[::self._write_steps] self.n_records = len(tot) if tot[-1] != self._time[-1]: self.n_records += 1 self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records)) self._recorded_fmatrix = np.zeros((self.n_traj, self.tg_ic.shape[1], self.tg_ic.shape[2], self.n_records)) for i in range(self.n_traj): self._ics_queue.put((i, self._time, self.ic[i], self.tg_ic[i], self._time_direction, self._write_steps, self._adjoint, self._inverse, self._boundary)) self._ics_queue.join() for i in range(self.n_traj): args = self._traj_queue.get() self._recorded_traj[args[0]] = args[1] self._recorded_fmatrix[args[0]] = args[2] if len(tg_ic_sav.shape) == 2: if self._recorded_fmatrix.shape[1:3] != tg_ic_sav.shape: self._recorded_fmatrix = np.swapaxes(self._recorded_fmatrix, 1, 2) elif len(tg_ic_sav.shape) == 3: if tg_ic_sav.shape[1] != self.n_dim: if self._recorded_fmatrix.shape[:3] != tg_ic_sav.shape: self._recorded_fmatrix = np.swapaxes(self._recorded_fmatrix, 1, 2)
Integrate simultaneously the non-linear and linearized ordinary differential equations (ODEs) .. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x}) and .. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \cdot \\boldsymbol{\\delta x} with a specified `Runge-Kutta method`_ and workers. The function :math:`\\boldsymbol{f}` is the `Numba`_ jitted function stored in :attr:`func`. The function :math:`\\boldsymbol{J}` is the `Numba`_ jitted function stored in :attr:`func_jac`. The result of the integration can be obtained afterward by calling :meth:`get_trajectories`. .. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods .. _Numba: https://numba.pydata.org/ .. _fundamental matrix of solutions: https://en.wikipedia.org/wiki/Fundamental_matrix_(linear_differential_equation) Parameters ---------- t0: float Initial time of the time integration. Corresponds to the initial condition. t: float Final time of the time integration. Corresponds to the final condition. dt: float Timestep of the integration. ic: None or ~numpy.ndarray(float), optional Initial (or final) conditions of the system. Can be a 1D or a 2D array: * 1D: Provide a single initial condition. Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`. * 2D: Provide an ensemble of initial condition. Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`, and where `n_traj` is the number of initial conditions. If `None`, use the initial conditions stored in :attr:`ic`. If then :attr:`ic` is `None`, use a zero initial condition. Default to `None`. If the `forward` argument is `False`, it specifies final conditions. tg_ic: None or ~numpy.ndarray(float), optional Initial (or final) conditions of the linear ODEs :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`. \n Can be a 1D, a 2D or a 3D array: * 1D: Provide a single initial condition. This initial condition of the linear ODEs will be the same used for each initial condition `ic` of the ODEs :math:`\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})` Should be of shape (`n_dim`,) where `n_dim` = :math:`\mathrm{dim}(\\boldsymbol{x})`. * 2D: Two sub-cases: + If `tg_ic.shape[0]`=`ic.shape[0]`, assumes that each initial condition `ic[i]` of :math:`\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})`, correspond to a different initial condition `tg_ic[i]`. + Else, assumes and integrate an ensemble of `n_tg_traj` initial condition of the linear ODEs for each initial condition of :math:`\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})`. * 3D: An array of shape (`n_traj`, `n_tg_traj`, `n_dim`) which provide an ensemble of `n_tg_ic` initial conditions specific to each of the `n_traj` initial conditions of :math:`\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})`. If `None`, use the identity matrix as initial condition, returning the `fundamental matrix of solutions`_ of the linear ODEs. Default to `None`. If the `forward` argument is `False`, it specifies final conditions. forward: bool, optional If true, integrate the ODEs forward in time, else, integrate backward in time. In case of backward integration, the initial condition `ic` becomes a final condition. Default to forward integration. adjoint: bool, optional If true, integrate the tangent :math:`\dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}(t, \\boldsymbol{x}) \cdot \\boldsymbol{\delta x}` , else, integrate the adjoint linear model :math:`\dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}^T(t, \\boldsymbol{x}) \cdot \\boldsymbol{\delta x}`. Integrate the tangent model by default. inverse: bool, optional Wheter or not to invert the Jacobian matrix :math:`\\boldsymbol{\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\mathrm{J}}^{-1}(t, \\boldsymbol{x})`. `False` by default. boundary: None or callable, optional Allow to add a inhomogeneous term to linear ODEs: :math:`\dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}(t, \\boldsymbol{x}) \cdot \\boldsymbol{\delta x} + \Psi(t, \\boldsymbol{x})`. The boundary :math:`\Psi` should have the same signature as :math:`\\boldsymbol{\mathrm{J}}`, i.e. ``func(t, x)``. If `None`, don't add anything (homogeneous case). `None` by default. write_steps: int, optional Save the state of the integration in memory every `write_steps` steps. The other intermediary steps are lost. It determines the size of the returned objects. Default is 1. Set to 0 to return only the final state.
https://github.com/climdyn/qgs/blob/33d79b1fa360de22b7ae595c142dbe9b6a8fb53a/qgs/integrators/integrator.py#L816-L1003
import multiprocessing import numpy as np from numba import njit from qgs.integrators.integrate import _integrate_runge_kutta_jit, _integrate_runge_kutta_tgls_jit, _zeros_func from qgs.functions.util import reverse class RungeKuttaIntegrator(object): def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None): if num_threads is None: self.num_threads = multiprocessing.cpu_count() else: self.num_threads = num_threads if a is None and b is None and c is None: self.c = np.array([0., 0.5, 0.5, 1.]) self.b = np.array([1./6, 1./3, 1./3, 1./6]) self.a = np.zeros((len(self.c), len(self.b))) self.a[1, 0] = 0.5 self.a[2, 1] = 0.5 self.a[3, 2] = 1. else: self.a = a self.b = b self.c = c self.ic = None self._time = None self._recorded_traj = None self.n_traj = 0 self.n_dim = number_of_dimensions self.n_records = 0 self._write_steps = 0 self._time_direction = 1 self.func = None self._ics_queue = None self._traj_queue = None self._processes_list = list() def terminate(self): for process in self._processes_list: process.terminate() process.join() def start(self): self.terminate() self._processes_list = list() self._ics_queue = multiprocessing.JoinableQueue() self._traj_queue = multiprocessing.Queue() for i in range(self.num_threads): self._processes_list.append(TrajectoryProcess(i, self.func, self.b, self.c, self.a, self._ics_queue, self._traj_queue)) for process in self._processes_list: process.daemon = True process.start() def set_func(self, f, ic_init=True): self.func = f if ic_init: self.ic = None self.start() def set_bca(self, b=None, c=None, a=None, ic_init=True): if a is not None: self.a = a if b is not None: self.b = b if c is not None: self.c = c if ic_init: self.ic = None self.start() def initialize(self, convergence_time, dt, pert_size=0.01, reconvergence_time=None, forward=True, number_of_trajectories=1, ic=None, reconverge=False): if reconverge is None: reconverge = False if ic is None: if self.n_dim is not None: i = self.n_dim else: i = 1 while True: self.ic = np.zeros(i) try: x = self.func(0., self.ic) except: i += 1 else: break i = len(self.func(0., self.ic)) if number_of_trajectories > self.num_threads: reconverge = True tmp_ic = np.zeros((number_of_trajectories, i)) tmp_ic[:self.num_threads] = np.random.randn(self.num_threads, i) else: tmp_ic = np.random.randn(number_of_trajectories, i) else: tmp_ic = ic.copy() if len(tmp_ic.shape) > 1: number_of_trajectories = tmp_ic.shape[0] if reconverge and reconvergence_time is not None: self.integrate(0., convergence_time, dt, ic=tmp_ic[:self.num_threads], write_steps=0, forward=forward) t, x = self.get_trajectories() tmp_ic[:self.num_threads] = x if number_of_trajectories - self.num_threads > self.num_threads: next_len = self.num_threads else: next_len = number_of_trajectories - self.num_threads index = self.num_threads while True: perturbation = pert_size * np.random.randn(next_len, x.shape[1]) self.integrate(0., reconvergence_time, dt, ic=x[:next_len]+perturbation, write_steps=0, forward=forward) t, x = self.get_trajectories() tmp_ic[index:index+next_len] = x index += next_len if number_of_trajectories - index > self.num_threads: next_len = self.num_threads else: next_len = number_of_trajectories - index if next_len <= 0: break self.ic = tmp_ic else: self.integrate(0., convergence_time, dt, ic=tmp_ic, write_steps=0, forward=forward) t, x = self.get_trajectories() self.ic = x def integrate(self, t0, t, dt, ic=None, forward=True, write_steps=1): if self.func is None: print('No function to integrate defined!') return 0 if ic is None: if self.ic is None: if self.n_dim is not None: i = self.n_dim else: i = 1 while True: self.ic = np.zeros(i) try: x = self.func(0., self.ic) except: i += 1 else: break i = len(self.func(0., self.ic)) self.ic = np.zeros(i) else: self.ic = ic if len(self.ic.shape) == 1: self.ic = self.ic.reshape((1, -1)) self.n_traj = self.ic.shape[0] self.n_dim = self.ic.shape[1] self._time = np.concatenate((np.arange(t0, t, dt), np.full((1,), t))) self._write_steps = write_steps if forward: self._time_direction = 1 else: self._time_direction = -1 if write_steps == 0: self.n_records = 1 else: tot = self._time[::self._write_steps] self.n_records = len(tot) if tot[-1] != self._time[-1]: self.n_records += 1 self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records)) for i in range(self.n_traj): self._ics_queue.put((i, self._time, self.ic[i], self._time_direction, self._write_steps)) self._ics_queue.join() for i in range(self.n_traj): args = self._traj_queue.get() self._recorded_traj[args[0]] = args[1] def get_trajectories(self): if self._write_steps > 0: if self._time_direction == 1: if self._time[::self._write_steps][-1] == self._time[-1]: return self._time[::self._write_steps], np.squeeze(self._recorded_traj) else: return np.concatenate((self._time[::self._write_steps], np.full((1,), self._time[-1]))), np.squeeze(self._recorded_traj) else: rtime = reverse(self._time[::-self._write_steps]) if rtime[0] == self._time[0]: return rtime, np.squeeze(self._recorded_traj) else: return np.concatenate((np.full((1,), self._time[0]), rtime)), np.squeeze(self._recorded_traj) else: return self._time[-1], np.squeeze(self._recorded_traj) def get_ic(self): return self.ic def set_ic(self, ic): self.ic = ic class TrajectoryProcess(multiprocessing.Process): def __init__(self, processID, func, b, c, a, ics_queue, traj_queue): super().__init__() self.processID = processID self._ics_queue = ics_queue self._traj_queue = traj_queue self.func = func self.a = a self.b = b self.c = c def run(self): while True: args = self._ics_queue.get() recorded_traj = _integrate_runge_kutta_jit(self.func, args[1], args[2][np.newaxis, :], args[3], args[4], self.b, self.c, self.a) self._traj_queue.put((args[0], recorded_traj)) self._ics_queue.task_done() class RungeKuttaTglsIntegrator(object): def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None): if num_threads is None: self.num_threads = multiprocessing.cpu_count() else: self.num_threads = num_threads if a is None and b is None and c is None: self.c = np.array([0., 0.5, 0.5, 1.]) self.b = np.array([1./6, 1./3, 1./3, 1./6]) self.a = np.zeros((len(self.c), len(self.b))) self.a[1, 0] = 0.5 self.a[2, 1] = 0.5 self.a[3, 2] = 1. else: self.a = a self.b = b self.c = c self.ic = None self.tg_ic = None self._time = None self._recorded_traj = None self._recorded_fmatrix = None self.n_traj = 0 self.n_tgtraj = 0 self.n_dim = number_of_dimensions self.n_records = 0 self._write_steps = 0 self._time_direction = 1 self._adjoint = False self._boundary = None self._inverse = 1. self.func = None self.func_jac = None self._ics_queue = None self._traj_queue = None self._processes_list = list() def terminate(self): for process in self._processes_list: process.terminate() process.join() def start(self): self.terminate() self._processes_list = list() self._ics_queue = multiprocessing.JoinableQueue() self._traj_queue = multiprocessing.Queue() for i in range(self.num_threads): self._processes_list.append(TglsTrajectoryProcess(i, self.func, self.func_jac, self.b, self.c, self.a, self._ics_queue, self._traj_queue)) for process in self._processes_list: process.daemon = True process.start() def set_func(self, f, fjac, ic_init=True): self.func = f self.func_jac = fjac if ic_init: self.ic = None self.start() def set_bca(self, b=None, c=None, a=None, ic_init=True): if a is not None: self.a = a if b is not None: self.b = b if c is not None: self.c = c if ic_init: self.ic = None self.start() def initialize(self, convergence_time, dt, pert_size=0.01, reconvergence_time=None, forward=True, number_of_trajectories=1, ic=None, reconverge=None): if reconverge is None: reconverge = False if ic is None: if self.n_dim is not None: i = self.n_dim else: i = 1 while True: self.ic = np.zeros(i) try: x = self.func(0., self.ic) except: i += 1 else: break i = len(self.func(0., self.ic)) if number_of_trajectories > self.num_threads: reconverge = True tmp_ic = np.zeros((number_of_trajectories, i)) tmp_ic[:self.num_threads] = np.random.randn(self.num_threads, i) else: tmp_ic = np.random.randn(number_of_trajectories, i) else: tmp_ic = ic.copy() if len(tmp_ic.shape) > 1: number_of_trajectories = tmp_ic.shape[0] if reconverge and reconvergence_time is not None: self.integrate(0., convergence_time, dt, ic=tmp_ic[:self.num_threads], write_steps=0, forward=forward) t, x, fm = self.get_trajectories() tmp_ic[:self.num_threads] = x if number_of_trajectories - self.num_threads > self.num_threads: next_len = self.num_threads else: next_len = number_of_trajectories - self.num_threads index = self.num_threads while True: perturbation = pert_size * np.random.randn(next_len, x.shape[1]) self.integrate(0., reconvergence_time, dt, ic=x[:next_len]+perturbation, write_steps=0, forward=forward) t, x, fm = self.get_trajectories() tmp_ic[index:index+next_len] = x index += next_len if number_of_trajectories - index > self.num_threads: next_len = self.num_threads else: next_len = number_of_trajectories - index if next_len <= 0: break self.ic = tmp_ic else: self.integrate(0., convergence_time, dt, ic=tmp_ic, write_steps=0, forward=forward) t, x, fm = self.get_trajectories() self.ic = x
MIT License
collerek/ormar
ormar/relations/relation.py
Relation.get
python
def get(self) -> Optional[Union[List["Model"], "Model"]]: return self.related_models
Return the related model or models from RelationProxy. :return: related model/models if set :rtype: Optional[Union[List[Model], Model]]
https://github.com/collerek/ormar/blob/5e946f514a3506c1702eb47e59dc42e5eb37f139/ormar/relations/relation.py#L182-L189
from enum import Enum from typing import ( Generic, List, Optional, Set, TYPE_CHECKING, Type, TypeVar, Union, cast, ) import ormar from ormar.exceptions import RelationshipInstanceError from ormar.relations.relation_proxy import RelationProxy if TYPE_CHECKING: from ormar.relations import RelationsManager from ormar.models import Model, NewBaseModel, T else: T = TypeVar("T", bound="Model") class RelationType(Enum): PRIMARY = 1 REVERSE = 2 MULTIPLE = 3 THROUGH = 4 class Relation(Generic[T]): def __init__( self, manager: "RelationsManager", type_: RelationType, field_name: str, to: Type["T"], through: Type["Model"] = None, ) -> None: self.manager = manager self._owner: "Model" = manager.owner self._type: RelationType = type_ self._to_remove: Set = set() self.to: Type["T"] = to self._through = through self.field_name: str = field_name self.related_models: Optional[Union[RelationProxy, "Model"]] = ( RelationProxy(relation=self, type_=type_, to=to, field_name=field_name) if type_ in (RelationType.REVERSE, RelationType.MULTIPLE) else None ) def clear(self) -> None: if self._type in (RelationType.PRIMARY, RelationType.THROUGH): self.related_models = None self._owner.__dict__[self.field_name] = None elif self.related_models is not None: related_models = cast("RelationProxy", self.related_models) related_models._clear() self._owner.__dict__[self.field_name] = None @property def through(self) -> Type["Model"]: if not self._through: raise RelationshipInstanceError("Relation does not have through model!") return self._through def _clean_related(self) -> None: cleaned_data = [ x for i, x in enumerate(self.related_models) if i not in self._to_remove ] self.related_models = RelationProxy( relation=self, type_=self._type, to=self.to, field_name=self.field_name, data_=cleaned_data, ) relation_name = self.field_name self._owner.__dict__[relation_name] = cleaned_data self._to_remove = set() def _find_existing( self, child: Union["NewBaseModel", Type["NewBaseModel"]] ) -> Optional[int]: if not isinstance(self.related_models, RelationProxy): raise ValueError("Cannot find existing models in parent relation type") if self._to_remove: self._clean_related() for ind, relation_child in enumerate(self.related_models[:]): try: if relation_child == child: return ind except ReferenceError: self._to_remove.add(ind) return None def add(self, child: "Model") -> None: relation_name = self.field_name if self._type in (RelationType.PRIMARY, RelationType.THROUGH): self.related_models = child self._owner.__dict__[relation_name] = child else: if self._find_existing(child) is None: self.related_models.append(child) rel = self._owner.__dict__.get(relation_name, []) rel = rel or [] if not isinstance(rel, list): rel = [rel] rel.append(child) self._owner.__dict__[relation_name] = rel def remove(self, child: Union["NewBaseModel", Type["NewBaseModel"]]) -> None: relation_name = self.field_name if self._type == RelationType.PRIMARY: if self.related_models == child: self.related_models = None del self._owner.__dict__[relation_name] else: position = self._find_existing(child) if position is not None: self.related_models.pop(position) del self._owner.__dict__[relation_name][position]
MIT License
catap/namebench
libnamebench/nameserver.py
NameServer.notes
python
def notes(self): my_notes = [] if self.system_position == 0: my_notes.append('The current preferred DNS server') elif self.system_position: my_notes.append('A backup DNS server for this system') if self.dhcp_position is not None: my_notes.append('Assigned by your network DHCP server') if self.is_failure_prone: my_notes.append('%s of %s queries failed' % (self.failure_count, self.request_count)) if self.HasTag('blacklist'): my_notes.append('BEWARE: IP appears in DNS server blacklist') if self.is_disabled: my_notes.append(self.disabled_msg) else: my_notes.extend(self.warnings) if self.errors: my_notes.extend(self.errors) return my_notes
Return a list of notes about this nameserver object.
https://github.com/catap/namebench/blob/9913a7a1a7955a3759eb18cbe73b421441a7a00f/libnamebench/nameserver.py#L237-L256
__author__ = 'tstromberg@google.com (Thomas Stromberg)' import random import re import socket import sys import time import dns.exception import dns.message import dns.name import dns.query import dns.rcode import dns.rdataclass import dns.rdatatype import dns.resolver import dns.reversename import dns.version import health_checks import provider_extensions import addr_util import util if dns.version.hexversion < 17301744: raise ValueError('dnspython 1.8.0+ required, while only %s was found. The ' 'namebench source bundles 1.8.0, so use it.' % dns.version.version) MAX_NORMAL_FAILURES = 2 MAX_KEEPER_FAILURES = 8 MAX_WARNINGS = 10 FAILURE_PRONE_RATE = 10 PROVIDER_TAGS = ['isp', 'network', 'likely-isp', 'dhcp'] BEST_TIMER_FUNCTION = util.GetMostAccurateTimerFunction() def ResponseToAscii(response): if not response: return None if response.answer: answers = [', '.join(map(str, x.items)) for x in response.answer] return ' -> '.join(answers).rstrip('"').lstrip('"') else: return dns.rcode.to_text(response.rcode()) class BrokenSystemClock(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class NameServer(health_checks.NameServerHealthChecks, provider_extensions.NameServerProvider): def __init__(self, ip, hostname=None, name=None, tags=None, provider=None, instance=None, location=None, latitude=None, longitude=None, asn=None, network_owner=None, dhcp_position=None, system_position=None): self.ip = ip self.name = name self.dhcp_position = dhcp_position self.system_position = system_position if tags: self.tags = set(tags) else: self.tags = set() self.provider = provider self.instance = instance self.location = location if self.location: self.country_code = location.split('/')[0] self.tags.add('country_%s' % self.country_code.lower()) else: self.country_code = None self.latitude = latitude self.longitude = longitude self.asn = asn self.network_owner = network_owner self._hostname = hostname self.timeout = 5 self.health_timeout = 5 self.ping_timeout = 1 self.ResetTestStatus() self._version = None self._node_ids = set() self.timer = BEST_TIMER_FUNCTION if ':' in self.ip: self.tags.add('ipv6') elif '.' in self.ip: self.tags.add('ipv4') if self.dhcp_position is not None: self.tags.add('dhcp') if self.system_position is not None: self.tags.add('system') if ip.endswith('.0') or ip.endswith('.255'): self.DisableWithMessage("IP appears to be a broadcast address.") elif self.is_bad: self.DisableWithMessage("Known bad address.") def AddNetworkTags(self, domain, provider, asn, country_code): if self.hostname: my_domain = addr_util.GetDomainFromHostname(self.hostname) hostname = self.hostname.lower() else: my_domain = 'UNKNOWN' hostname = '' if provider: provider = provider.lower() if domain and my_domain == domain: self.tags.add('isp') if asn and self.asn == asn: self.tags.add('network') if provider and 'isp' not in self.tags: if (provider in self.name.lower() or provider in self.hostname.lower() or (self.network_owner and provider in self.network_owner.lower())): self.tags.add('isp') elif provider and self.country_code == country_code and my_domain != domain: if (provider in self.name.lower() or provider in hostname or (self.network_owner and provider in self.network_owner.lower())): self.tags.add('likely-isp') def ResetTestStatus(self): self.warnings = set() self.shared_with = set() if self.is_disabled: self.tags.remove('disabled') self.checks = [] self.failed_test_count = 0 self.share_check_count = 0 self.cache_checks = [] self.is_slower_replica = False self.ResetErrorCounts() def ResetErrorCounts(self): self.request_count = 0 self.failure_count = 0 self.error_map = {} @property def is_keeper(self): return bool(self.MatchesTags(['preferred', 'dhcp', 'system', 'specified'])) @property def is_bad(self): if not self.is_keeper and self.MatchesTags(['rejected', 'blacklist']): return True @property def is_hidden(self): return self.HasTag('hidden') @property def is_disabled(self): return self.HasTag('disabled') @property def check_average(self): if len(self.checks) == 1: return self.checks[0][3] else: return util.CalculateListAverage([x[3] for x in self.checks[1:]]) @property def fastest_check_duration(self): if self.checks: return min([x[3] for x in self.checks]) else: return 0.0 @property def check_duration(self): return sum([x[3] for x in self.checks]) @property def warnings_string(self): if self.is_disabled: return 'DISABLED: %s' % self.disabled_msg else: return ', '.join(map(str, self.warnings)) @property def errors(self): return ['%s (%s requests)' % (_[0], _[1]) for _ in self.error_map.items() if _[0] != 'Timeout'] @property def error_count(self): return sum([_[1] for _ in self.error_map.items() if _[0] != 'Timeout']) @property def timeout_count(self): return self.error_map.get('Timeout', 0) @property
Apache License 2.0
commvault/cvpysdk
cvpysdk/subclients/virtualserver/livesync/vsa_live_sync.py
LiveSyncVMPair.vm_pair_name
python
def vm_pair_name(self): return self._vm_pair_name
Treats the live sync name as a read-only attribute.
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/subclients/virtualserver/livesync/vsa_live_sync.py#L699-L701
import uuid from past.builtins import basestring from ....constants import HypervisorType as hv_type from ....constants import VSALiveSyncStatus as sync_status from ....constants import VSAFailOverStatus as failover_status from ....exception import SDKException from ....schedules import SchedulePattern class VsaLiveSync: def __new__(cls, subclient_object): instance_name = subclient_object._instance_object.instance_name if instance_name == hv_type.MS_VIRTUAL_SERVER.value.lower(): from .hyperv_live_sync import HyperVLiveSync return object.__new__(HyperVLiveSync) if instance_name == hv_type.AZURE_V2.value.lower(): from .azure_live_sync import AzureLiveSync return object.__new__(AzureLiveSync) if instance_name == hv_type.VIRTUAL_CENTER.value.lower(): from .vmware_live_sync import VMWareLiveSync return object.__new__(VMWareLiveSync) if instance_name == hv_type.AMAZON_AWS.value.lower(): from .amazon_live_sync import AmazonLiveSync return object.__new__(AmazonLiveSync) raise SDKException( 'LiveSync', '102', 'Virtual server Live Sync for Instance: "{0}" is not yet supported'.format(instance_name) ) def __init__(self, subclient_object): self._subclient_object = subclient_object self._subclient_id = self._subclient_object.subclient_id self._subclient_name = self._subclient_object.name self.schedule_pattern = SchedulePattern() self._live_sync_pairs = None self._commcell_object = self._subclient_object._commcell_object self._cvpysdk_object = self._commcell_object._cvpysdk_object self._services = self._commcell_object._services self._update_response_ = self._commcell_object._update_response_ self._ALL_LIVE_SYNC_PAIRS = self._services['GET_ALL_LIVE_SYNC_PAIRS'] % self._subclient_id self.refresh() def __str__(self): representation_string = '{:^5}\t{:^20}\n\n'.format('S. No.', 'LiveSyncPair') for index, live_sync in enumerate(self.live_sync_pairs): sub_str = '{:^5}\t{:20}\n'.format(index + 1, live_sync) representation_string += sub_str return representation_string.strip() def __repr__(self): return 'VSALiveSync class instance for the Subclient: "{0}"'.format(self._subclient_name) def _get_live_sync_pairs(self): flag, response = self._cvpysdk_object.make_request('GET', self._ALL_LIVE_SYNC_PAIRS) if flag: live_sync_pairs_dict = {} if not bool(response.json()): return live_sync_pairs_dict elif response.json() and 'siteInfo' in response.json(): for dictionary in response.json()['siteInfo']: if dictionary["replicationGroup"]["replicationGroupName"] != "": temp_name = dictionary["replicationGroup"]["replicationGroupName"] else: temp_name = dictionary['subTask']['subtaskName'] temp_id = str(dictionary['subTask']['taskId']) live_sync_pairs_dict[temp_name] = { 'id': temp_id } return live_sync_pairs_dict raise SDKException('Response', '102') raise SDKException('Response', '101', self._update_response_(response.text)) @staticmethod def _live_sync_subtask_json(schedule_name): return { "subTaskType": "RESTORE", "operationType": "SITE_REPLICATION", "subTaskName": schedule_name } def _configure_live_sync(self, schedule_name, restore_options, pattern_dict=None): restore_options['replication_guid'] = str(uuid.uuid1()) request_json = self._subclient_object._prepare_fullvm_restore_json(restore_options) request_json = self.schedule_pattern.create_schedule( request_json, pattern_dict or {'freq_type': 'after_job_completes'}) request_json['taskInfo']['subTasks'][0]['subTask'] = self._live_sync_subtask_json(schedule_name) return self._subclient_object._process_restore_response(request_json) @property def live_sync_pairs(self): return self._live_sync_pairs def get(self, live_sync_name): if not isinstance(live_sync_name, basestring): raise SDKException('LiveSync', '101') if self.has_live_sync_pair(live_sync_name): return LiveSyncPairs( self._subclient_object, live_sync_name, self.live_sync_pairs[live_sync_name]['id']) raise SDKException( 'LiveSync', '102', 'No Live Sync exists with given name: {0}'.format(live_sync_name) ) def has_live_sync_pair(self, live_sync_name): return self.live_sync_pairs and live_sync_name in self.live_sync_pairs def refresh(self): self._live_sync_pairs = self._get_live_sync_pairs() class LiveSyncPairs: def __init__(self, subclient_object, live_sync_name, live_sync_id=None): self._subclient_object = subclient_object self._subclient_id = self._subclient_object.subclient_id self._subclient_name = self._subclient_object.name self._live_sync_name = live_sync_name self._commcell_object = self._subclient_object._commcell_object self._cvpysdk_object = self._commcell_object._cvpysdk_object self._services = self._commcell_object._services self._update_response_ = self._commcell_object._update_response_ self._live_sync_id = live_sync_id or self._get_live_sync_id() self._LIVE_SYNC_VM_PAIRS = self._services['GET_ALL_LIVE_SYNC_VM_PAIRS'] % ( self._subclient_id, self.live_sync_id ) self._vm_pairs = None self.refresh() def __str__(self): representation_string = '{:^5}\t{:^20}\n\n'.format('S. No.', 'LiveSyncVMPair') for index, vm_pair in enumerate(self.vm_pairs): sub_str = '{:^5}\t{:20}\n'.format(index + 1, vm_pair) representation_string += sub_str return representation_string.strip() def __repr__(self): representation_string = 'LiveSyncPairs class instance for Subclient: "{0}"' return representation_string.format(self._subclient_name) def _get_live_sync_id(self): return self._subclient_object.live_sync.get(self.live_sync_name).live_sync_id def _get_live_sync_vm_pairs(self): flag, response = self._cvpysdk_object.make_request('GET', self._LIVE_SYNC_VM_PAIRS) if flag: live_sync_vm_pairs = {} if not bool(response.json()): return live_sync_vm_pairs elif response.json() and 'siteInfo' in response.json(): for dictionary in response.json()['siteInfo']: temp_name = dictionary['sourceName'] temp_id = str(dictionary['replicationId']) live_sync_vm_pairs[temp_name] = { 'id': temp_id } return live_sync_vm_pairs raise SDKException('Response', '102') raise SDKException('Response', '101', self._update_response_(response.text)) @property def vm_pairs(self): return self._vm_pairs def get(self, vm_pair_name): if not isinstance(vm_pair_name, basestring): raise SDKException('LiveSync', '101') if self.has_vm_pair(vm_pair_name): return LiveSyncVMPair( self, vm_pair_name, self.vm_pairs[vm_pair_name]['id'] ) raise SDKException( 'LiveSync', '102', 'No VM pair exists with given name: {0}'.format(vm_pair_name) ) def has_vm_pair(self, vm_pair_name): return self.vm_pairs and vm_pair_name in self.vm_pairs @property def live_sync_id(self): return self._live_sync_id @property def live_sync_name(self): return self._live_sync_name def refresh(self): self._vm_pairs = self._get_live_sync_vm_pairs() class LiveSyncVMPair: def __init__(self, live_sync_pair_object, vm_pair_name, vm_pair_id=None): self.live_sync_pair = live_sync_pair_object self._subclient_object = self.live_sync_pair._subclient_object self._subclient_id = self._subclient_object.subclient_id self._subclient_name = self._subclient_object.name self._vm_pair_name = vm_pair_name self._commcell_object = self._subclient_object._commcell_object self._agent_object = self._subclient_object._agent_object self._cvpysdk_object = self._commcell_object._cvpysdk_object self._services = self._commcell_object._services self._update_response_ = self._commcell_object._update_response_ self._vm_pair_id = vm_pair_id or self._get_vm_pair_id() self._VM_PAIR = self._services['GET_LIVE_SYNC_VM_PAIR'] % ( self._subclient_id, self._vm_pair_id ) self._properties = None self._replication_guid = None self._status = None self._failover_status = None self._source_vm = None self._destination_vm = None self._destination_client = None self._destination_proxy = None self._destination_instance = None self._last_backup_job = None self._latest_replication_job = None self.refresh() def __repr__(self): representation_string = 'LiveSyncVMPair class instance for Live Sync: "{0}"' return representation_string.format(self.live_sync_pair.live_sync_name) def _get_vm_pair_id(self): return self.live_sync_pair.get(self.vm_pair_name).vm_pair_id def _get_vm_pair_properties(self): flag, response = self._cvpysdk_object.make_request('GET', self._VM_PAIR) if flag: if not bool(response.json()): pass elif response.json() and 'siteInfo' in response.json(): self._properties = response.json()['siteInfo'][0] self._replication_guid = self._properties['replicationGuid'] self._status = self._properties['status'] self._failover_status = self._properties['FailoverStatus'] self._source_vm = self._properties['sourceName'] self._destination_vm = self._properties['destinationName'] self._destination_client = self._properties['destinationInstance'].get( 'clientName') or self._commcell_object.clients.get( self._properties['destinationInstance'].get('clientId')).name self._destination_proxy = self._properties['destProxy'].get( 'clientName') or self._commcell_object.clients.get( self._properties['destProxy'].get('clientId')).name self._destination_instance = self._properties['destinationInstance'].get( 'instanceName') or self._agent_object.instances.get( self._properties['destinationInstance'].get('instanceId')).name self._last_backup_job = self._properties['lastSyncedBkpJob'] else: raise SDKException('Response', '102') else: raise SDKException('Response', '101', self._update_response_(response.text)) @property def vm_pair_id(self): return self._vm_pair_id @property
Apache License 2.0
multmeio/django-hstore-flattenfields
hstore_flattenfields/db/fields.py
HstoreDecimalField.to_python
python
def to_python(self, value): if not value or value is models.fields.NOT_PROVIDED: value = None elif not isinstance(value, Decimal): try: value = Decimal(value) except InvalidOperation: value = None return value
Validates that the input is a decimal number. Returns a Decimal instance. Returns None for empty values. Ensures that there are no more than max_digits in the number, and no more than decimal_places digits after the decimal point.
https://github.com/multmeio/django-hstore-flattenfields/blob/09626a638b9ef85d28fa5bfef1b040f9926bb95b/hstore_flattenfields/db/fields.py#L180-L194
from django import forms from django.db import models from django.utils.text import capfirst from decimal import Decimal, InvalidOperation from datetime import date, datetime from hstore_flattenfields.utils import * from hstore_flattenfields.forms.fields import * from hstore_flattenfields.models import DynamicField class HstoreTextField(models.TextField): __metaclass__ = models.SubfieldBase def __init__(self, *args, **kwargs): self.html_attrs = kwargs.pop('html_attrs', None) super(HstoreTextField, self).__init__(*args, **kwargs) def formfield(self, form_class=HstoreTextFieldFormField, **kwargs): defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, 'html_attrs': self.html_attrs } defaults.update(kwargs) return form_class(**defaults) def to_python(self, value): if not value or value is models.fields.NOT_PROVIDED: value = None elif not isinstance(value, basestring): value = str(value) return value class HstoreFloatField(models.FloatField): __metaclass__ = models.SubfieldBase def __init__(self, *args, **kwargs): self.html_attrs = kwargs.pop('html_attrs', None) super(HstoreFloatField, self).__init__(*args, **kwargs) def formfield(self, form_class=HstoreNumberFormField, **kwargs): defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, 'html_attrs': self.html_attrs } defaults.update(kwargs) return form_class(**defaults) def to_python(self, value): if not value or value is models.fields.NOT_PROVIDED: return None if isinstance(value, float): return value else: return float(value) class HstoreIntegerField(models.IntegerField): __metaclass__ = models.SubfieldBase def __init__(self, *args, **kwargs): self.html_attrs = kwargs.pop('html_attrs', None) super(HstoreIntegerField, self).__init__(*args, **kwargs) def formfield(self, form_class=HstoreNumberFormField, **kwargs): defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, 'html_attrs': self.html_attrs } defaults.update(kwargs) return form_class(**defaults) def to_python(self, value): if not value or value is models.fields.NOT_PROVIDED: return None if isinstance(value, int): return value else: try: return int(value) except ValueError: return None def _get_val_from_obj(self, obj): try: return getattr(obj, self.attname) except AttributeError: return getattr(obj, self.name) def value_to_string(self, obj): value = self._get_val_from_obj(obj) if value: return str(value) else: return '' def clean(self, value, instance): return value class HstoreCharField(models.CharField): __metaclass__ = models.SubfieldBase def __init__(self, *args, **kwargs): self.html_attrs = kwargs.pop('html_attrs', None) super(HstoreCharField, self).__init__(*args, **kwargs) def formfield(self, form_class=HstoreCharFormField, **kwargs): defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, 'html_attrs': self.html_attrs } defaults.update(kwargs) return form_class(**defaults) def get_choices(self, include_blank=False): dynamic_field = DynamicField.objects.get(name=self.name) choices = single_list_to_tuple( str2literal(dynamic_field.choices) ) if include_blank and choices and dynamic_field.has_blank_option: choices.insert(0, ('', '----')) return choices or self._choices def to_python(self, value): if not value or value is models.fields.NOT_PROVIDED: value = None elif not isinstance(value, basestring): value = str(value) return value def value_to_string(self, obj): value = self._get_val_from_obj(obj) or '' if value and not isinstance(value, basestring): value = str(value) return value class HstoreDecimalField(models.DecimalField): __metaclass__ = models.SubfieldBase def __init__(self, *args, **kwargs): self.html_attrs = kwargs.pop('html_attrs', None) super(HstoreDecimalField, self).__init__(*args, **kwargs) def formfield(self, form_class=HstoreDecimalFormField, **kwargs): defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, 'html_attrs': self.html_attrs } defaults.update(kwargs) return form_class(**defaults)
BSD 3-Clause New or Revised License
facebookresearch/torcharrow
torcharrow/velox_rt/numerical_column_cpu.py
NumericalColumnCpu.percentiles
python
def percentiles(self, q, interpolation="midpoint"): if len(self) == 0 or len(q) == 0: return [] out = [] s = sorted(self) for percent in q: k = (len(self) - 1) * (percent / 100) f = math.floor(k) c = math.ceil(k) if f == c: out.append(s[int(k)]) continue d0 = s[int(f)] * (c - k) d1 = s[int(c)] * (k - f) out.append(d0 + d1) return out
Compute the q-th percentile of non-null data.
https://github.com/facebookresearch/torcharrow/blob/f5c5bd3d46fd42a8480b4fae3945415c42a2275d/torcharrow/velox_rt/numerical_column_cpu.py#L911-L927
import array as ar import math import operator import statistics from typing import Dict, List, Literal, Optional, Union, cast, Callable import numpy as np import torcharrow._torcharrow as velox import torcharrow.dtypes as dt from torcharrow.expression import expression from torcharrow.icolumn import IColumn from torcharrow.inumerical_column import INumericalColumn from torcharrow.scope import ColumnFactory from torcharrow.trace import trace from .column import ColumnFromVelox from .typing import get_velox_type class NumericalColumnCpu(INumericalColumn, ColumnFromVelox): def __init__(self, scope, device, dtype, data, mask): assert dt.is_boolean_or_numerical(dtype) super().__init__(scope, device, dtype) self._data = velox.Column(get_velox_type(dtype)) for m, d in zip(mask.tolist(), data.tolist()): if m: self._data.append_null() else: self._data.append(d) self._finialized = False @staticmethod def _full(scope, device, data, dtype=None, mask=None): assert isinstance(data, np.ndarray) and data.ndim == 1 if dtype is None: dtype = dt.typeof_np_ndarray(data.dtype) else: if dtype != dt.typeof_np_dtype(data.dtype): pass if not dt.is_boolean_or_numerical(dtype): raise TypeError(f"construction of columns of type {dtype} not supported") if mask is None: mask = NumericalColumnCpu._valid_mask(len(data)) elif len(data) != len(mask): raise ValueError( f"data length {len(data)} must be the same as mask length {len(mask)}" ) return NumericalColumnCpu(scope, device, dtype, data, mask) @staticmethod def _empty(scope, device, dtype): return NumericalColumnCpu( scope, device, dtype, ar.array(dtype.arraycode), ar.array("b") ) @staticmethod def _fromlist( scope, device: str, data: List[Union[int, float, bool]], dtype: dt.DType ): velox_column = velox.Column(get_velox_type(dtype), data) return ColumnFromVelox.from_velox( scope, device, dtype, velox_column, True, ) def _append_null(self): if self._finialized: raise AttributeError("It is already finialized.") self._data.append_null() def _append_value(self, value): if self._finialized: raise AttributeError("It is already finialized.") self._data.append(value) def _finalize(self): self._finialized = True return self def _valid_mask(self, ct): raise np.full((ct,), False, dtype=np.bool8) def __len__(self): return len(self._data) def null_count(self): return self._data.get_null_count() @trace def copy(self): return self.scope._FullColumn(self._data.copy(), self.mask.copy()) def getdata(self, i): if i < 0: i += len(self._data) if self._data.is_null_at(i): return self.dtype.default else: return self._data[i] def getmask(self, i): if i < 0: i += len(self._data) return self._data.is_null_at(i) def ite(self, then_, else_): if not dt.is_boolean(self.dtype): raise TypeError("condition must be a boolean vector") if not isinstance(then_, IColumn): then_ = self._Column(then_) if not isinstance(else_, IColumn): else_ = self._Column(else_) lub = dt.common_dtype(then_.dtype, else_.dtype) if lub is None or dt.is_void(lub): raise TypeError( "then and else branches must have compatible types, got {then_.dtype} and {else_.dtype}, respectively" ) if isinstance(then_, NumericalColumnCpu) and isinstance( else_, NumericalColumnCpu ): col = velox.Column(get_velox_type(lub)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append( then_.getdata(i) if self.getdata(i) else else_.getdata(i) ) return ColumnFromVelox.from_velox(self.scope, self.device, lub, col, True) else: return super.ite(self, then_, else_) @trace @expression def sort( self, columns: Optional[List[str]] = None, ascending=True, na_position: Literal["last", "first"] = "last", ): if columns is not None: raise TypeError("sort on numerical column can't have 'columns' parameter") res = [] none_count = 0 for i in range(len(self)): if self.getmask(i): none_count += 1 else: res.append(self.getdata(i)) res.sort(reverse=not ascending) col = velox.Column(get_velox_type(self.dtype)) if na_position == "first": for i in range(none_count): col.append_null() for value in res: col.append(value) if na_position == "last": for i in range(none_count): col.append_null() return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def nlargest( self, n=5, columns: Optional[List[str]] = None, keep: Literal["last", "first"] = "first", ): if columns is not None: raise TypeError( "computing n-largest on numerical column can't have 'columns' parameter" ) return self.sort(columns=None, ascending=False, na_position=keep).head(n) @trace @expression def nsmallest(self, n=5, columns: Optional[List[str]] = None, keep="first"): if columns is not None: raise TypeError( "computing n-smallest on numerical column can't have 'columns' parameter" ) return self.sort(columns=None, ascending=True, na_position=keep).head(n) @trace @expression def nunique(self, dropna=True): result = set() for i in range(len(self)): if self.getmask(i): if not dropna: result.add(None) else: result.add(self.getdata(i)) return len(result) def _checked_binary_op_call( self, other: Union[INumericalColumn, int, float, bool], op_name: str ) -> INumericalColumn: if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): result_col = getattr(self._data, op_name)(other._data) result_dtype = result_col.dtype().with_null( self.dtype.nullable or other.dtype.nullable ) return ColumnFromVelox.from_velox( self.scope, self.device, result_dtype, result_col, True ) else: assert ( isinstance(other, int) or isinstance(other, float) or isinstance(other, bool) ) result_col = getattr(self._data, op_name)(other) result_dtype = result_col.dtype().with_null(self.dtype.nullable) return ColumnFromVelox.from_velox( self.scope, self.device, result_dtype, result_col, True ) def _checked_comparison_op_call( self, other: Union[INumericalColumn, List[int], List[float], int, float], op_name: str, ) -> INumericalColumn: if isinstance(other, list): other = self.scope.Column(other) return self._checked_binary_op_call(other, op_name) def _checked_arithmetic_op_call( self, other: Union[int, float, bool], op_name: str, fallback_py_op: Callable ) -> INumericalColumn: def should_use_py_impl( self, other: Union[INumericalColumn, int, float, bool] ) -> bool: if dt.is_boolean(self.dtype): if isinstance(other, NumericalColumnCpu) and dt.is_boolean(other.dtype): return True elif not isinstance(other, NumericalColumnCpu): return True return False if should_use_py_impl(self, other): return self._py_arithmetic_op(other, fallback_py_op) return self._checked_binary_op_call(other, op_name) @trace @expression def __add__(self, other: Union[INumericalColumn, int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "add", operator.add) @trace @expression def __radd__(self, other: Union[int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "radd", IColumn.swap(operator.add) ) @trace @expression def __sub__(self, other: Union[INumericalColumn, int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "sub", operator.sub) @trace @expression def __rsub__(self, other: Union[int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "rsub", IColumn.swap(operator.sub) ) @trace @expression def __mul__(self, other: Union[INumericalColumn, int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "mul", operator.mul) @trace @expression def __rmul__(self, other: Union[int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "rmul", IColumn.swap(operator.mul) ) @trace @expression def __floordiv__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(dt.float64)) assert len(self) == len(other) for i in range(len(self)): if self.getmask(i) or other.getmask(i): col.append_null() else: col.append(self.getdata(i) // other.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) else: col = velox.Column(get_velox_type(dt.float64)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append(self.getdata(i) // other) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) @trace @expression def __rfloordiv__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(self.dtype)) assert len(self) == len(other) for i in range(len(self)): if self.getmask(i) or other.getmask(i): col.append_null() else: col.append(other.getdata(i) // self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) else: col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append(other // self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def __truediv__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(dt.float64)) assert len(self) == len(other) for i in range(len(self)): other_data = other.getdata(i) if self.getmask(i) or other.getmask(i): col.append_null() elif other_data == 0: col.append_null() else: col.append(self.getdata(i) / other_data) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) else: col = velox.Column(get_velox_type(dt.float64)) for i in range(len(self)): if self.getmask(i): col.append_null() elif other == 0: col.append_null() else: col.append(self.getdata(i) / other) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) @trace @expression def __rtruediv__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(dt.float64)) assert len(self) == len(other) for i in range(len(self)): self_data = self.getdata(i) if self.getmask(i) or other.getmask(i): col.append_null() elif self_data == 0: col.append_null() else: col.append(other.getdata(i) / self_data) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) else: col = velox.Column(get_velox_type(dt.float64)) for i in range(len(self)): self_data = self.getdata(i) if self.getmask(i) or self.getdata(i) == 0: col.append_null() elif self_data == 0: col.append_null() else: col.append(other / self_data) return ColumnFromVelox.from_velox( self.scope, self.device, dt.float64, col, True ) @trace @expression def __mod__(self, other: Union[INumericalColumn, int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "mod", operator.mod) @trace @expression def __rmod__(self, other: Union[int, float]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "rmod", IColumn.swap(operator.mod) ) @trace @expression def __pow__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(self.dtype)) assert len(self) == len(other) for i in range(len(self)): if self.getmask(i) or other.getmask(i): col.append_null() else: col.append(self.getdata(i) ** other.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) else: col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append(self.getdata(i) ** other) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def __rpow__(self, other): if isinstance(other, INumericalColumn): self.scope.check_is_same(other.scope) if isinstance(other, NumericalColumnCpu): col = velox.Column(get_velox_type(self.dtype)) assert len(self) == len(other) for i in range(len(self)): if self.getmask(i) or other.getmask(i): col.append_null() else: col.append(other.getdata(i) ** self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) else: col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append(other ** self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def __eq__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "eq") @trace @expression def __ne__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "neq") @trace @expression def __lt__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "lt") @trace @expression def __gt__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "gt") @trace @expression def __le__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "lte") @trace @expression def __ge__( self, other: Union[INumericalColumn, List[int], List[float], int, float] ): return self._checked_comparison_op_call(other, "gte") @trace @expression def __and__(self, other: Union[INumericalColumn, int]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "bitwise_and", operator.__and__) @trace @expression def __rand__(self, other: Union[int]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "bitwise_rand", IColumn.swap(operator.__and__) ) @trace @expression def __or__(self, other: Union[INumericalColumn, int]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "bitwise_or", operator.__or__) @trace @expression def __ror__(self, other: Union[int]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "bitwise_ror", IColumn.swap(operator.__or__) ) @trace @expression def __xor__(self, other: Union[INumericalColumn, int]) -> INumericalColumn: return self._checked_arithmetic_op_call(other, "bitwise_xor", operator.__xor__) @trace @expression def __rxor__(self, other: Union[int]) -> INumericalColumn: return self._checked_arithmetic_op_call( other, "bitwise_rxor", IColumn.swap(operator.__xor__) ) @trace @expression def __invert__(self): return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, self._data.invert(), True ) @trace @expression def __neg__(self): return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, self._data.neg(), True ) @trace @expression def __pos__(self): return self @trace @expression def isin(self, values, invert=False): if invert: raise NotImplementedError() col = velox.Column(get_velox_type(dt.boolean)) for i in range(len(self)): if self.getmask(i): col.append(False) else: col.append(self.getdata(i) in values) return ColumnFromVelox.from_velox( self.scope, self.device, dt.Boolean(self.dtype.nullable), col, True ) @trace @expression def abs(self): return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, self._data.abs(), True ) @trace @expression def ceil(self): return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, self._data.ceil(), True ) @trace @expression def floor(self): return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, self._data.floor(), True ) @trace @expression def round(self, decimals=0): col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): col.append_null() else: col.append(round(self.getdata(i), decimals)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def fillna(self, fill_value: Union[dt.ScalarTypes, Dict]): if not isinstance(fill_value, IColumn.scalar_types): raise TypeError(f"fillna with {type(fill_value)} is not supported") if not self.isnullable: return self else: col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): if isinstance(fill_value, Dict): raise NotImplementedError() else: col.append(fill_value) else: col.append(self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def dropna(self, how: Literal["any", "all"] = "any"): if not self.isnullable: return self else: col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): pass else: col.append(self.getdata(i)) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def drop_duplicates( self, subset: Optional[List[str]] = None, ): if subset is not None: raise TypeError(f"subset parameter for numerical columns not supported") seen = set() col = velox.Column(get_velox_type(self.dtype)) for i in range(len(self)): if self.getmask(i): col.append_null() else: current = self.getdata(i) if current not in seen: col.append(current) seen.add(current) return ColumnFromVelox.from_velox( self.scope, self.device, self.dtype, col, True ) @trace @expression def min(self, numeric_only=None, fill_value=None): result = None for i in range(len(self)): if not self.getmask(i): value = self.getdata(i) if result is None or value < result: result = value return result @trace @expression def max(self, fill_value=None): result = None for i in range(len(self)): if not self.getmask(i): value = self.getdata(i) if result is None or value > result: result = value return result @trace @expression def all(self): for i in range(len(self)): if not self.getmask(i): value = self.getdata(i) if value == False: return False return True @trace @expression def any(self, skipna=True, boolean_only=None): for i in range(len(self)): if not self.getmask(i): value = self.getdata(i) if value == True: return True return False @trace @expression def sum(self): result = 0 for i in range(len(self)): if not self.getmask(i): result += self.getdata(i) return result @trace @expression def prod(self): result = 1 for i in range(len(self)): if not self.getmask(i): result *= self.getdata(i) return result def _accumulate_column(self, func, *, skipna=True, initial=None): it = iter(self) res = [] total = initial rest_is_null = False if initial is None: try: total = next(it) except StopIteration: raise ValueError(f"cum[min/max] undefined for empty column.") if total is None: raise ValueError(f"cum[min/max] undefined for columns with row 0 as null.") res.append(total) for element in it: if rest_is_null: res.append(None) continue if element is None: if skipna: res.append(None) else: res.append(None) rest_is_null = True else: total = func(total, element) res.append(total) return self.scope.Column(res, self.dtype) @trace @expression def cummin(self): return self._accumulate_column(min, skipna=True, initial=None) @trace @expression def cummax(self): return self._accumulate_column(max, skipna=True, initial=None) @trace @expression def cumsum(self): return self._accumulate_column(operator.add, skipna=True, initial=None) @trace @expression def cumprod(self): return self._accumulate_column(operator.mul, skipna=True, initial=None) @trace @expression def mean(self): return statistics.mean(value for value in self if value is not None) @trace @expression def median(self): return statistics.median(value for value in self if value is not None) @trace @expression def Cpu(self, ddof=1): return np.ma.Cpu(self._ma(), ddof=ddof) @trace @expression
BSD 3-Clause New or Revised License
atlusio/wowza
wowza/wowza.py
StreamTargets.token_auth_info
python
def token_auth_info(self, stream_target_id): path = self.base_url + '{}/token_auth'.format(stream_target_id) response = session.get(path, headers=self.headers) return response.json()
Get the details of the token authorization applied to a stream target
https://github.com/atlusio/wowza/blob/c07b758bc335c7b4ee3f992682cc0882529f50fc/wowza/wowza.py#L341-L347
import json, time from . import session from . import WOWZA_API_KEY, WOWZA_ACCESS_KEY, WOWZA_BASE_URL from wowza.exceptions import InvalidParamDict, InvalidParameter, MissingParameter, InvalidInteraction, InvalidStateChange, TokenAuthBusy, GeoblockingBusy, LimitReached class LiveStreams(object): def __init__(self, base_url=WOWZA_BASE_URL, api_key=WOWZA_API_KEY, access_key=WOWZA_ACCESS_KEY): self.id = id self.base_url = base_url self.headers = { 'wsc-api-key': WOWZA_API_KEY, 'wsc-access-key': WOWZA_ACCESS_KEY, 'content-type': 'application/json' } def info(self, stream_id=None, options=None): if options and not stream_id: raise InvalidParameter({ 'message': 'When getting optional info on a stream, a \ stream_id needs to be provided.' }) path = self.base_url + 'live_streams/' if stream_id: path = path + stream_id if options: path = path + "/{}".format(options) response = session.get(path, headers=self.headers) return response.json() def create(self, param_dict): required_params = [ 'name', 'broadcast_location', 'encoder', 'aspect_ratio_height', 'aspect_ratio_width' ] for key in required_params: if key not in param_dict: raise MissingParameter({ 'message': 'Missing parameter [{}]. Cannot create \ live stream.'.format(key) }) param_dict = { 'live_stream': param_dict } path = self.base_url + 'live_streams/' response = session.post( path, json.dumps(param_dict), headers=self.headers ) return response.json() def update(self, stream_id, param_dict): if isinstance(param_dict, dict): param_dict = { 'live_stream': param_dict } path = self.base_url + 'live_streams/{}'.format(stream_id) response = session.patch(path, json.dumps(param_dict), headers=self.headers) return response.json() else: raise InvalidParamDict({ 'message': 'Desired parameters for update should be passed in \ as a dictionary with key-value pairs. \ i.e. {\'transcoder_type\': \'transcoded\'}' }) def delete(self, stream_id): state = self.info(stream_id, 'state')['live_stream']['state'] if state is not 'started': path = self.base_url + 'live_streams/{}'.format(stream_id) response = session.delete(path, headers=self.headers) return response else: raise InvalidInteraction({ 'message': 'Cannot delete a running event. Stop the event first \ and try again.' }) def start(self, stream_id): path = self.base_url + "live_streams/{}/start".format(stream_id) response = session.put(path, data='', headers=self.headers) return response.json() def reset(self, stream_id): path = self.base_url + "live_streams/{}/reset".format(stream_id) response = session.put(path, data='', headers=self.headers) if 'meta' in response.json(): if response.json()['meta']['code'] == 'ERR-422-InvalidInteraction': raise InvalidInteraction({ 'message': 'Unable to reset stream. Invalid state for resetting.' }) return response.json() def stop(self, stream_id): state = self.info(stream_id, 'state')['live_stream']['state'] if state == 'started': path = self.base_url + "live_streams/{}/stop".format(stream_id) response = session.put(path, data='', headers=self.headers) if 'meta' in response.json(): if response.json()['meta']['code'] == 'ERR-422-InvalidInteraction': raise InvalidInteraction({ 'message': 'Unable to stop stream. Invalid state for stopping.' }) else: raise InvalidStateChange({ 'message': 'Cannot stop a live stream that is not running.' }) return response.json() def stats(self, stream_id): path = self.base_url + "live_streams/{}/stats".format(stream_id) response = session.get(path, headers=self.headers) return response.json() def new_code(self, stream_id): path = self.base_url + "live_streams/{}/regenerate_connection_code".format(stream_id) response = session.put(path, data='', headers=self.headers) return response.json() def regenerate_connection_code(self, stream_id): return self.new_code(stream_id) class StreamSources(object): def __init__(self, base_url=WOWZA_BASE_URL + 'stream_sources/', api_key=WOWZA_API_KEY, access_key=WOWZA_ACCESS_KEY): self.id = id self.base_url = base_url self.headers = { 'wsc-api-key': WOWZA_API_KEY, 'wsc-access-key': WOWZA_ACCESS_KEY, 'content-type': 'application/json' } def info(self, source_id=None): path = self.base_url path = "{}/{}".format(path, source_id) if source_id else path response = session.get(path, headers=self.headers) return response.json() def source(self, source_id): return self.info(source_id) def create(self, param_dict): if isinstance(param_dict, dict): path = self.base_url param_dict = { 'stream_source': param_dict } response = session.post(path, json.dumps(param_dict), headers=self.headers) return response.json() else: return InvalidParamDict({ 'message': 'The provided parameter dictionary is not valid.' }) def update(self, source_id, param_dict): if isinstance(param_dict, dict): path = self.base_url + source_id param_dict = { 'stream_source': param_dict } response = session.patch(path, json.dumps(param_dict), headers=self.headers) return response.json() else: return InvalidParamDict({ 'message': 'The provided parameter dictionary is not valid.' }) def delete(self, source_id): path = self.base_url + source_id response = session.delete(path, headers=self.headers) return response class StreamTargets(object): def __init__(self, base_url=WOWZA_BASE_URL + 'stream_targets/', api_key=WOWZA_API_KEY, access_key=WOWZA_ACCESS_KEY): self.id = id self.base_url = base_url self.headers = { 'wsc-api-key': WOWZA_API_KEY, 'wsc-access-key': WOWZA_ACCESS_KEY, 'content-type': 'application/json' } def info(self, stream_target_id=None): path = self.base_url path = "{}{}".format(path, stream_target_id) if stream_target_id else path response = session.get(path, headers=self.headers) return response.json() def create(self, param_dict): if isinstance(param_dict, dict): path = self.base_url param_dict = { 'stream_target': param_dict } response = session.post(path, json.dumps(param_dict), headers=self.headers) if 'meta' in response.json(): if 'LimitReached' in response.json()['meta']['code']: raise LimitReached({ 'message': response.json()['meta']['message'] }) return response.json() else: return InvalidParamDict({ 'message': 'Invalid parameter dictionary provided.' }) def update(self, stream_target_id, param_dict): if isinstance(param_dict, dict): path = self.base_url + stream_target_id param_dict = { 'stream_target': param_dict } response = session.patch(path, json.dumps(param_dict), headers=self.headers) return response.json() else: raise InvalidParamDict({ 'message': 'Invalid parameter dictionary provided.' }) def delete(self, stream_target_id): path = self.base_url + stream_target_id response = session.delete(path, headers=self.headers) return response def new_code(self, stream_target_id): path = self.base_url + '{}/regenerate_connection_code'.format(stream_target_id) response = session.put(path, data='', headers=self.headers) return response.json() def regenerate_connection_code(self, stream_target_id): return self.new_code(stream_target_id)
MIT License
extreme-classification/decaf
DECAF/libs/dataset.py
DatasetSparse.__getitem__
python
def __getitem__(self, index): x = self.features[index] y = self.get_shortlist(index) meta = {'num_labels': self.num_labels+self.offset, 'true_num_labels': self.num_labels} return x, y, meta
Get features and labels for index Arguments --------- index: int data for this index Returns ------- features: np.ndarray or tuple for dense: np.ndarray for sparse: feature indices and their weights labels: tuple shortlist: label indices in the shortlist labels_mask: 1 for relevant; 0 otherwise dist: distance (used during prediction only)
https://github.com/extreme-classification/decaf/blob/1522e7ac1f56469a0b8a19181755a6f752acad45/DECAF/libs/dataset.py#L151-L171
import torch import _pickle as pickle import os import sys from scipy.sparse import lil_matrix import numpy as np import scipy.sparse as sp from .dataset_base import DatasetBase from .shortlist_handler import ShortlistHandlerSimple def construct_dataset(data_dir, fname_features, fname_labels, data=None, model_dir='', mode='train', size_shortlist=-1, normalize_features=True, normalize_labels=True, keep_invalid=False, num_centroids=1, feature_type='sparse', num_clf_partitions=1, feature_indices=None, label_indices=None, shortlist_method='static', shorty=None, classifier_type=None): if classifier_type in ["DECAF"]: return DatasetDECAF( data_dir, fname_features, fname_labels, data, model_dir, mode, feature_indices, label_indices, keep_invalid, normalize_features, normalize_labels, num_clf_partitions, size_shortlist, num_centroids, feature_type, shorty, "DECAF", shortlist_method) else: raise NotImplementedError( "Unknown dataset method: {}!".format(classifier_type)) class DatasetSparse(DatasetBase): def __init__(self, data_dir, fname_features, fname_labels, data=None, model_dir='', mode='train', feature_indices=None, label_indices=None, keep_invalid=False, normalize_features=True, normalize_labels=False, num_clf_partitions=1, size_shortlist=-1, num_centroids=1, feature_type='sparse', shortlist_method='static', shorty=None, label_type='sparse', shortlist_in_memory=True): super().__init__(data_dir, fname_features, fname_labels, data, model_dir, mode, feature_indices, label_indices, keep_invalid, normalize_features, normalize_labels, feature_type, label_type=label_type) if self.labels is None: NotImplementedError( "No support for shortlist w/o any label, \ consider using dense dataset.") self.feature_type = feature_type self.num_centroids = num_centroids self.num_clf_partitions = num_clf_partitions self.shortlist_in_memory = shortlist_in_memory self.size_shortlist = size_shortlist self.multiple_cent_mapping = None self.shortlist_method = shortlist_method self.offset = 0 if self.mode == 'train': self._remove_samples_wo_features_and_labels() if not keep_invalid: self._process_labels(model_dir) if shortlist_method == 'simple': self.offset += 1 self.shortlist = ShortlistHandlerSimple( self.num_labels, model_dir, num_clf_partitions, mode, size_shortlist, num_centroids, shortlist_in_memory, self.multiple_cent_mapping) else: raise NotImplementedError( "Unknown shortlist method: {}!".format(shortlist_method)) self.label_padding_index = self.num_labels def update_shortlist(self, shortlist, dist, fname='tmp', idx=-1): pass def _process_labels(self, model_dir): data_obj = {} fname = os.path.join(model_dir, 'labels_params.pkl') if self.mode == 'train': self._process_labels_train(data_obj) pickle.dump(data_obj, open(fname, 'wb')) else: data_obj = pickle.load(open(fname, 'rb')) self._process_labels_predict(data_obj) def get_shortlist(self, index): pos_labels, _ = self.labels[index] return self.shortlist.get_shortlist(index, pos_labels)
MIT License
carla-simulator/ros-bridge
carla_common/src/carla_common/transforms.py
carla_acceleration_to_ros_accel
python
def carla_acceleration_to_ros_accel(carla_acceleration): ros_accel = Accel() ros_accel.linear.x = carla_acceleration.x ros_accel.linear.y = -carla_acceleration.y ros_accel.linear.z = carla_acceleration.z return ros_accel
Convert a carla acceleration to a ROS accel Considers the conversion from left-handed system (unreal) to right-handed system (ROS) The angular accelerations remain zero. :param carla_acceleration: the carla acceleration :type carla_acceleration: carla.Vector3D :return: a ROS accel :rtype: geometry_msgs.msg.Accel
https://github.com/carla-simulator/ros-bridge/blob/dac9e729b70a3db9da665c1fdb843e96e7e25d04/carla_common/src/carla_common/transforms.py#L222-L240
import math import numpy import carla from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel from transforms3d.euler import euler2mat, quat2euler, euler2quat from transforms3d.quaternions import quat2mat, mat2quat def carla_location_to_numpy_vector(carla_location): return numpy.array([ carla_location.x, -carla_location.y, carla_location.z ]) def carla_location_to_ros_vector3(carla_location): ros_translation = Vector3() ros_translation.x = carla_location.x ros_translation.y = -carla_location.y ros_translation.z = carla_location.z return ros_translation def carla_location_to_ros_point(carla_location): ros_point = Point() ros_point.x = carla_location.x ros_point.y = -carla_location.y ros_point.z = carla_location.z return ros_point def carla_rotation_to_RPY(carla_rotation): roll = math.radians(carla_rotation.roll) pitch = -math.radians(carla_rotation.pitch) yaw = -math.radians(carla_rotation.yaw) return (roll, pitch, yaw) def carla_rotation_to_ros_quaternion(carla_rotation): roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation) quat = euler2quat(roll, pitch, yaw) ros_quaternion = Quaternion(w=quat[0], x=quat[1], y=quat[2], z=quat[3]) return ros_quaternion def carla_rotation_to_numpy_rotation_matrix(carla_rotation): roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation) numpy_array = euler2mat(roll, pitch, yaw) rotation_matrix = numpy_array[:3, :3] return rotation_matrix def carla_rotation_to_directional_numpy_vector(carla_rotation): rotation_matrix = carla_rotation_to_numpy_rotation_matrix(carla_rotation) directional_vector = numpy.array([1, 0, 0]) rotated_directional_vector = rotation_matrix.dot(directional_vector) return rotated_directional_vector def carla_vector_to_ros_vector_rotated(carla_vector, carla_rotation): rotation_matrix = carla_rotation_to_numpy_rotation_matrix(carla_rotation) tmp_array = rotation_matrix.dot(numpy.array([carla_vector.x, carla_vector.y, carla_vector.z])) ros_vector = Vector3() ros_vector.x = tmp_array[0] ros_vector.y = -tmp_array[1] ros_vector.z = tmp_array[2] return ros_vector def carla_velocity_to_ros_twist(carla_linear_velocity, carla_angular_velocity, carla_rotation=None): ros_twist = Twist() if carla_rotation: ros_twist.linear = carla_vector_to_ros_vector_rotated(carla_linear_velocity, carla_rotation) else: ros_twist.linear = carla_location_to_ros_vector3(carla_linear_velocity) ros_twist.angular.x = math.radians(carla_angular_velocity.x) ros_twist.angular.y = -math.radians(carla_angular_velocity.y) ros_twist.angular.z = -math.radians(carla_angular_velocity.z) return ros_twist def carla_velocity_to_numpy_vector(carla_velocity): return numpy.array([ carla_velocity.x, -carla_velocity.y, carla_velocity.z ])
MIT License
daler/hubward
hubward/liftover.py
download_chainfile
python
def download_chainfile(source_assembly, target_assembly): cache_dir = os.environ.get( 'HUBWARD_CACHE_DIR', os.path.expanduser('~/.hubward_cache')) utils.makedirs(cache_dir) url = chainfile_url(source_assembly, target_assembly) dest = os.path.join(cache_dir, os.path.basename(url)) if not os.path.exists(dest): log('Downloading {0} to {1}'.format(url, dest)) utils.download(url, dest) return dest
Download if needed, putting in the cache_dir. If the environmental variable HUBWARD_CACHE_DIR does not exist, then use ~/.hubward_cache
https://github.com/daler/hubward/blob/533cc9279cfdf6cab3a79c0179a8287e9f8055ed/hubward/liftover.py#L13-L28
import utils import subprocess import os import shutil import pybedtools from hubward.log import log
BSD 3-Clause New or Revised License
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/linux_packages/gluster.py
MountGluster
python
def MountGluster(vm, gluster_server, volume_name, mount_point): vm.Install('gluster') volume = '{ip}:/{volume_name}'.format( ip=gluster_server.internal_ip, volume_name=volume_name) vm.RemoteCommand('sudo mkdir -p %s' % mount_point) vm.RemoteCommand('sudo mount -t glusterfs {volume} {mount_point}'.format( volume=volume, mount_point=mount_point))
Mounts a Gluster volume on the Virtual Machine. Args: vm: The VM to mount the Gluster volume on. gluster_server: A Gluster server that knows about the volume. volume_name: The name of the volume to mount. mount_point: The location to mount the volume on 'vm'.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/linux_packages/gluster.py#L54-L68
import posixpath from absl import flags from perfkitbenchmarker import os_types from perfkitbenchmarker import vm_util FLAGS = flags.FLAGS flags.DEFINE_integer( 'gluster_replicas', 3, 'The number of Gluster replicas.') flags.DEFINE_integer( 'gluster_stripes', 1, 'The number of Gluster stripes.') def YumInstall(vm): if FLAGS.os_type != os_types.CENTOS7: raise NotImplementedError( 'PKB currently only supports installation of gluster on centos7 or ' 'Debian-based VMs.') vm.InstallEpelRepo() vm.InstallPackages('centos-release-gluster') vm.InstallPackages('glusterfs-server') vm.RemoteCommand('sudo glusterd') def AptInstall(vm): vm.RemoteCommand('sudo add-apt-repository ppa:gluster/glusterfs-6') vm.AptUpdate() vm.InstallPackages('glusterfs-server')
Apache License 2.0
andreartelt/ceml
ceml/backend/jax/preprocessing/scaler.py
StandardScaler.predict
python
def predict(self, x): return (x - self.mu) / self.sigma
Computes the forward pass.
https://github.com/andreartelt/ceml/blob/364d4630d6a01592c2ab86f2d53dbb7feb682381/ceml/backend/jax/preprocessing/scaler.py#L18-L22
import numpy as np from ....model import Model from .affine_preprocessing import AffinePreprocessing class StandardScaler(Model, AffinePreprocessing): def __init__(self, mu, sigma, **kwds): self.mu = mu self.sigma = sigma A = np.diag(1. / self.sigma) super().__init__(A=A, b=-1. * A @ self.mu, **kwds)
MIT License
rainingcomputers/pykitml
pykitml/decision_tree.py
_Node.decision
python
def decision(self, input_data): if(input_data.ndim == 1): input_data = np.array([input_data]) cond = condition(input_data[:, self._col], self._split, self._ftype) left_output = self.left_node.decision(input_data[cond]) right_output = self.right_node.decision(input_data[~cond]) outputs = np.zeros((input_data.shape[0], left_output.shape[1])) outputs[cond] = left_output outputs[~cond] = right_output return outputs
Splits the dataset and passes it to subnodes. The inputs travel till the reach a leaf node and backtrack as outputs.
https://github.com/rainingcomputers/pykitml/blob/1c3e50cebcdb6c4da63979ef9a812b44d23a4857/pykitml/decision_tree.py#L71-L90
from itertools import combinations import numpy as np from graphviz import Digraph import tqdm from ._regressor import Regressor from ._classifier import Classifier from ._exceptions import _valid_list, InvalidFeatureType from . import _functions def condition(column, split, ftype): if(ftype == 'ranked' or ftype == 'continues'): return column < split elif(ftype == 'categorical'): cond = np.full(column.shape, True, dtype=bool) for category in split: cond = np.logical_and(cond, (column==category)) return cond class _Node: def __init__(self, split, col, gini_index, nindex, feature_type): self._split = split self._col = col self._ftype = feature_type self._gini = round(gini_index, 4) self._index = nindex self.right_node = None self.left_node = None @property def leaf(self): return False
MIT License
marisj/financials
financials/xbrl.py
XBRL.add_quarter
python
def add_quarter(self, qtr): self.recreate_files(qtr) c = openurl('{}/full-index/{}/xbrl.idx'.format(self.edgar, qtr)) lines = c.read().decode().split('\n') c.close() for line in lines[10:]: try: (cik, name, form, date, filing) = [x.strip() for x in line.strip().split('|')] if form in self.forms: self.parse(cik, name, form, date, filing) except ValueError: with open(os.path.join(os.path.realpath('.'), 'check.txt'), 'a') as f: f.write('{}\n'.format(line))
Pulls XBRL financial statement links from quarterly EDGAR index. Usage: >>> XBRL().add_quarter('2009/QTR1') :param qtr: 'YYYY/QTR2'
https://github.com/marisj/financials/blob/284294315e5e56ba32f0e0a9940c41ae9079d200/financials/xbrl.py#L105-L126
import os import time import datetime from collections import defaultdict try: from http.client import IncompleteRead except ImportError: from httplib import IncompleteRead import lxml.html from lxml import etree from financials.helper import * class XBRL(object): def __init__(self): self.edgar = 'https://www.sec.gov/Archives/edgar' self.forms = [ '10-K', '10-K/A', '10-KT', '10-KT/A', '10-Q', '10-Q/A', '10-QT', '10-QT/A'] self.context = None self.filepath = os.path.join(os.path.realpath('.'), 'data') self.datapath = None self.history = None self.accession = None self.annual = None self.tree = None self.entity = None def get_index(self, url): c = openurl(url) tree = lxml.html.parse(c) c.close() elem = tree.getroot().xpath('//table[@class="tableFile"]/tr') tmp = [[x.text_content() for x in tr.xpath('.//td')] for tr in elem] return [[x[3], x[1], x[2]] for x in tmp if len(x) == 5 and x[3]] def recreate_files(self, qtr): q = qtr.replace('/QTR', 'Q') self.datapath = '{}/{}'.format(self.filepath, q) try: os.remove(self.datapath) except OSError: pass finally: with open(self.datapath, 'a') as f: f.write('{}\n'.format('|'.join([ 'focus', 'ticker', 'cik', 'zip', 'form', 'formdate', 'filedate', 'acceptance', 'accession', 'name', 'bs_assets', 'bs_cash', 'bs_currentassets', 'bs_ppenet', 'bs_ppegross', 'bs_currentliabilities', 'bs_liabilities', 'bs_longtermdebtnoncurrent', 'bs_longtermdebtcurrent', 'bs_longtermdebt', 'bs_equity', 'is_sales', 'is_cogs', 'is_grossprofit', 'is_research', 'is_sga', 'is_opexpenses', 'is_incometax', 'is_netincome', 'is_opincome', 'cf_operating', 'cf_depreciation', 'cf_depreciationamortization', 'cf_investing', 'cf_financing', 'cf_dividends', 'cf_cashchange']))) self.history = '{}/history/{}'.format(self.filepath, q) try: os.remove(self.history) except OSError: pass finally: with open(self.history, 'a') as f: f.write('{}\n'.format('|'.join([ 'accession', 'field', 'element', 'date', 'value'])))
BSD 3-Clause New or Revised License
angr/cle
cle/backends/xbe.py
XBESection.only_contains_uninitialized_data
python
def only_contains_uninitialized_data(self): return False
We load every section in, they're all initialized
https://github.com/angr/cle/blob/7996cb1789eccc461cb31ab3c6234a74015489fd/cle/backends/xbe.py#L44-L48
import logging try: from xbe import Xbe except ImportError: Xbe = None import archinfo from ..errors import CLEError from . import Backend, register_backend from .region import Segment, Section l = logging.getLogger(name=__name__) class XBESection(Section): def __init__(self, name, file_offset, file_size, virtual_addr, virtual_size, xbe_sec): super().__init__(name, file_offset, virtual_addr, virtual_size) self.filesize = file_size self._xbe_sec = xbe_sec @property def is_readable(self): return True @property def is_writable(self): return (self._xbe_sec.header.flags & self._xbe_sec.header.FLAG_WRITEABLE) != 0 @property def is_executable(self): return (self._xbe_sec.header.flags & self._xbe_sec.header.FLAG_EXECUTABLE) != 0 @property
BSD 2-Clause Simplified License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/device_tracker/bmw_connected_drive.py
BMWDeviceTracker.__init__
python
def __init__(self, see, vehicle): self._see = see self.vehicle = vehicle
Initialize the Tracker.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/device_tracker/bmw_connected_drive.py#L33-L36
import logging from homeassistant.components.bmw_connected_drive import DOMAIN as BMW_DOMAIN from homeassistant.util import slugify DEPENDENCIES = ['bmw_connected_drive'] _LOGGER = logging.getLogger(__name__) def setup_scanner(hass, config, see, discovery_info=None): accounts = hass.data[BMW_DOMAIN] _LOGGER.debug('Found BMW accounts: %s', ', '.join([a.name for a in accounts])) for account in accounts: for vehicle in account.account.vehicles: tracker = BMWDeviceTracker(see, vehicle) account.add_update_listener(tracker.update) tracker.update() return True class BMWDeviceTracker(object):
MIT License
elerac/polanalyser
polanalyser/stokes.py
calcLinearStokes
python
def calcLinearStokes(intensities, thetas): muellers = [ polarizer(theta)[..., :3, :3] for theta in thetas ] return calcStokes(intensities, muellers)
Calculate only linear polarization stokes vector from observed intensity and linear polarizer angle Parameters ---------- intensities : np.ndarray Intensity of measurements (height, width, n) theta : np.ndarray Linear polarizer angles (n, ) Returns ------- S : np.ndarray Stokes vector (height, width, 3)
https://github.com/elerac/polanalyser/blob/801c6ed71d635dfc20e1514f76408133956c8d9d/polanalyser/stokes.py#L38-L54
import cv2 import numpy as np from .mueller import polarizer from .util import njit_if_available def calcStokes(intensities, muellers): if not isinstance(intensities, np.ndarray): intensities = np.stack(intensities, axis=-1) if not isinstance(muellers, np.ndarray): muellers = np.stack(muellers, axis=-1) if muellers.ndim == 1: thetas = muellers return calcLinearStokes(intensities, thetas) A = muellers[0].T A_pinv = np.linalg.pinv(A) stokes = np.tensordot(A_pinv, intensities, axes=(1, -1)) stokes = np.moveaxis(stokes, 0, -1) return stokes
MIT License
gitexl/whacked4
src/whacked4/dehacked/entry.py
Entry.parse_field_value
python
def parse_field_value(self, key, value): field = self.FIELDS[key] if field.type == FieldType.FLAGS: return self.table.flags_parse_string(key, value) elif field.type == FieldType.INT or field.type == FieldType.AMMO or field.type == FieldType.SOUND or field.type == FieldType.SPRITE or field.type == FieldType.STATE: try: return int(value) except ValueError: raise ValueError('Value "{}" for field "{}" is not an integer.'.format(value, key)) elif field.type == FieldType.FLOAT: try: return float(value) except ValueError: raise ValueError('Value "{}" for field "{}" is not a float.'.format(value, key)) elif field.type == FieldType.STRING or field.type == FieldType.ACTION or field.type == FieldType.ENUM_GAME or field.type == FieldType.ENUM_RENDER_STYLE: return str(value) raise ValueError('Unknown field value type "{}".'.format(field.type))
Validates a patch field value. @param key: The field key to validate the value against. @param value: The value to validate. @returns: A validated value.
https://github.com/gitexl/whacked4/blob/b0618efdedc2b11bb3e7e994f2c3280b8e53751c/src/whacked4/dehacked/entry.py#L75-L107
import copy from dataclasses import dataclass from typing import Optional from whacked4.dehacked.table import Table from whacked4.enum import WhackedEnum class FieldType(WhackedEnum): INT = 'int' FLOAT = 'float' STRING = 'string' STATE = 'state' SOUND = 'sound' AMMO = 'ammo' SPRITE = 'sprite' FLAGS = 'flags' ACTION = 'action' ENUM_GAME = 'enum_game' ENUM_RENDER_STYLE = 'enum_render_style' @dataclass class Field: patch_key: str type: FieldType class Entry: NAME = None STRUCTURE = None FIELDS = None def __init__(self, table: Table): self.name: Optional[str] = None self.table: Table = table self.values = {} self.extra_values = {} self.unused = False def __getitem__(self, key): return self.values[key] def __setitem__(self, key, value): if key not in self.values: raise KeyError('Cannot find patch key "{}".'.format(key)) self.values[key] = value def __contains__(self, item): return item in self.values
BSD 2-Clause Simplified License
theislab/scib
scIB/metrics/metrics.py
metrics_fast
python
def metrics_fast( adata, adata_int, batch_key, label_key, **kwargs ): return metrics( adata, adata_int, batch_key, label_key, isolated_labels_asw_=True, silhouette_=True, hvg_score_=True, graph_conn_=True, pcr_=True, **kwargs )
Only fast metrics: Biological conservation HVG overlap Cell type ASW Isolated label ASW Batch conservation Graph connectivity Batch ASW PC regression
https://github.com/theislab/scib/blob/a757bfca2d936abf1454c428896999c341fa7b29/scIB/metrics/metrics.py#L18-L49
import pandas as pd from scIB.utils import * from scIB.clustering import opt_louvain from .ari import ari from .cell_cycle import cell_cycle from .graph_connectivity import graph_connectivity from .highly_variable_genes import hvg_overlap from .isolated_labels import isolated_labels from .kbet import kBET from .lisi import ilisi_graph, clisi_graph from .nmi import nmi from .pcr import pcr_comparison from .silhouette import silhouette, silhouette_batch from .trajectory import trajectory_conservation
MIT License
beeware/briefcase
src/briefcase/commands/dev.py
DevCommand.distribution_path
python
def distribution_path(self, app, packaging_format): raise NotImplementedError()
A placeholder; Dev command doesn't have a distribution path
https://github.com/beeware/briefcase/blob/860fc395ff34ef0aa5e32233892ffced798708a8/src/briefcase/commands/dev.py#L36-L38
import os import subprocess import sys from typing import Optional from briefcase.config import BaseConfig from briefcase.exceptions import BriefcaseCommandError from .base import BaseCommand from .create import DependencyInstallError, write_dist_info class DevCommand(BaseCommand): cmd_line = 'briefcase dev' command = 'dev' output_format = None description = 'Run a briefcase project in the dev environment' @property def platform(self): return { 'darwin': 'macOS', 'linux': 'linux', 'win32': 'windows', }[sys.platform] def bundle_path(self, app): raise NotImplementedError() def binary_path(self, app): raise NotImplementedError()
BSD 3-Clause New or Revised License
nastools/homeassistant
homeassistant/components/media_player/gpmdp.py
request_configuration
python
def request_configuration(hass, config, url, add_devices_callback): configurator = get_component('configurator') if 'gpmdp' in _CONFIGURING: configurator.notify_errors( _CONFIGURING['gpmdp'], "Failed to register, please try again.") return from websocket import create_connection websocket = create_connection((url), timeout=1) websocket.send(json.dumps({'namespace': 'connect', 'method': 'connect', 'arguments': ['Home Assistant']})) def gpmdp_configuration_callback(callback_data): while True: from websocket import _exceptions try: msg = json.loads(websocket.recv()) except _exceptions.WebSocketConnectionClosedException: continue if msg['channel'] != 'connect': continue if msg['payload'] != "CODE_REQUIRED": continue pin = callback_data.get('pin') websocket.send(json.dumps({'namespace': 'connect', 'method': 'connect', 'arguments': ['Home Assistant', pin]})) tmpmsg = json.loads(websocket.recv()) if tmpmsg['channel'] == 'time': _LOGGER.error('Error setting up GPMDP. Please pause' ' the desktop player and try again.') break code = tmpmsg['payload'] if code == 'CODE_REQUIRED': continue setup_gpmdp(hass, config, code, add_devices_callback) _save_config(hass.config.path(GPMDP_CONFIG_FILE), {"CODE": code}) websocket.send(json.dumps({'namespace': 'connect', 'method': 'connect', 'arguments': ['Home Assistant', code]})) websocket.close() break _CONFIGURING['gpmdp'] = configurator.request_config( hass, DEFAULT_NAME, gpmdp_configuration_callback, description=( 'Enter the pin that is displayed in the ' 'Google Play Music Desktop Player.'), submit_caption="Submit", fields=[{'id': 'pin', 'name': 'Pin Code', 'type': 'number'}] )
Request configuration steps from the user.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/media_player/gpmdp.py#L49-L105
import logging import json import os import socket import time import voluptuous as vol from homeassistant.components.media_player import ( MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_PAUSE, SUPPORT_VOLUME_SET, SUPPORT_SEEK, MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.const import ( STATE_PLAYING, STATE_PAUSED, STATE_OFF, CONF_HOST, CONF_PORT, CONF_NAME) from homeassistant.loader import get_component import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['websocket-client==0.37.0'] _CONFIGURING = {} _LOGGER = logging.getLogger(__name__) DEFAULT_HOST = 'localhost' DEFAULT_NAME = 'GPM Desktop Player' DEFAULT_PORT = 5672 GPMDP_CONFIG_FILE = 'gpmpd.conf' SUPPORT_GPMDP = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | SUPPORT_VOLUME_SET PLAYBACK_DICT = {'0': STATE_PAUSED, '1': STATE_PAUSED, '2': STATE_PLAYING} PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, })
MIT License
product-definition-center/product-definition-center
pdc/apps/release/models.py
AllowedPushTargetsModel._parent_allowed_push_targets
python
def _parent_allowed_push_targets(self): raise NotImplementedError()
Returns allowed push targets from parent. This method must be overridden.
https://github.com/product-definition-center/product-definition-center/blob/af79f73c30fa5f5709ba03d584b7a49b83166b81/pdc/apps/release/models.py#L139-L145
import json from django.db import models from django.core.validators import RegexValidator from django.db.models.signals import pre_save from django.dispatch import receiver from django.core.exceptions import ValidationError from productmd.common import RELEASE_SHORT_RE, RELEASE_VERSION_RE from productmd.common import create_release_id from pdc.apps.common.hacks import as_list from . import signals from pdc.apps.common.models import SigKey from pdc.apps.repository.models import PushTarget, Service def validateCPE(cpe): if not cpe.startswith("cpe:"): return 'CPE must start with "cpe:"' return None class ReleaseType(models.Model): short = models.CharField(max_length=255, blank=False, unique=True) name = models.CharField(max_length=255, blank=False, unique=True) suffix = models.CharField(max_length=255, blank=True, unique=True) class Meta: ordering = ('short', 'name', 'suffix') def __unicode__(self): return u"%s" % self.short class BaseProduct(models.Model): base_product_id = models.CharField(max_length=200, blank=False, unique=True) short = models.CharField(max_length=200, validators=[ RegexValidator(regex=RELEASE_SHORT_RE.pattern, message='Only accept lowercase letters, numbers or -')]) version = models.CharField(max_length=200) name = models.CharField(max_length=255) release_type = models.ForeignKey(ReleaseType, blank=False, db_index=True, on_delete=models.CASCADE) class Meta: unique_together = ( ("short", "version", "release_type"), ("name", "version", "release_type"), ) ordering = ("base_product_id", ) def __unicode__(self): return unicode(self.base_product_id) def get_base_product_id(self): return create_release_id(self.short.lower(), self.version, self.release_type.short) def export(self): return { "base_product_id": self.base_product_id, "short": self.short, "version": self.version, "name": self.name, "release_type": self.release_type.short, } @receiver(pre_save, sender=BaseProduct) def populate_base_product_id(sender, instance, **kwargs): instance.base_product_id = instance.get_base_product_id() class Product(models.Model): name = models.CharField(max_length=200) short = models.CharField(max_length=200, unique=True, validators=[ RegexValidator(regex=RELEASE_SHORT_RE.pattern, message='Only accept lowercase letters, numbers or -')]) allowed_push_targets = models.ManyToManyField(PushTarget) class Meta: ordering = ("short", ) def __unicode__(self): return self.short @property def active(self): return self.active_release_count > 0 @property def product_version_count(self): return self.productversion_set.count() @property def active_product_version_count(self): return sum(1 for pv in self.productversion_set.all() if pv.active) @property def release_count(self): return sum(pv.release_count for pv in self.productversion_set.all()) @property def active_release_count(self): return sum(pv.active_release_count for pv in self.productversion_set.all()) def export(self): return { "name": self.name, "short": self.short, "allowed_push_targets": [push_target.name for push_target in self.allowed_push_targets.all()], } class AllowedPushTargetsModel(models.Model): masked_push_targets = models.ManyToManyField(PushTarget) class Meta: abstract = True
MIT License
cfedermann/appraise
appraise/evaluation/models.py
EvaluationResult.readable_duration
python
def readable_duration(self): return '{}'.format(self.duration)
Returns a readable version of the this EvaluationResult's duration.
https://github.com/cfedermann/appraise/blob/2cce477efd5594699d6e0fa58f6312df60e05394/appraise/evaluation/models.py#L539-L543
import logging import uuid from xml.etree.ElementTree import Element, fromstring, ParseError, tostring from django.dispatch import receiver from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models from django.template import Context from django.template.loader import get_template from appraise.settings import LOG_LEVEL, LOG_HANDLER from appraise.utils import datetime_to_seconds logging.basicConfig(level=LOG_LEVEL) LOGGER = logging.getLogger('appraise.evaluation.models') LOGGER.addHandler(LOG_HANDLER) APPRAISE_TASK_TYPE_CHOICES = ( ('1', 'Quality Checking'), ('2', 'Ranking'), ('3', 'Post-editing'), ('4', 'Error classification'), ('5', '3-Way Ranking'), ) def validate_source_xml_file(value): value.open() try: _tree = fromstring(value.read()) assert(_tree.tag == 'set'), 'expected <set> on top-level' for _attr in ('id', 'source-language', 'target-language'): assert(_attr in _tree.attrib.keys()), 'missing required <set> attribute {0}'.format(_attr) for _child in _tree: validate_item_xml(_child) except (AssertionError, ParseError), msg: raise ValidationError('Invalid XML: "{0}".'.format(msg)) value.close() return value class EvaluationTask(models.Model): task_id = models.CharField( max_length=32, db_index=True, unique=True, editable=False, help_text="Unique task identifier for this evaluation task.", verbose_name="Task identifier" ) task_name = models.CharField( max_length=100, db_index=True, help_text="Unique, descriptive name for this evaluation task.", unique=True, verbose_name="Task name" ) task_type = models.CharField( max_length=1, choices=APPRAISE_TASK_TYPE_CHOICES, db_index=True, help_text="Type choice for this evaluation task.", verbose_name="Task type" ) task_xml = models.FileField( upload_to='source-xml', help_text="XML source file for this evaluation task.", validators=[validate_source_xml_file], verbose_name="Task XML source" ) task_attributes = {} description = models.TextField( blank=True, help_text="(Optional) Text describing this evaluation task.", verbose_name="Description" ) users = models.ManyToManyField( User, blank=True, db_index=True, null=True, help_text="(Optional) Users allowed to work on this evaluation task." ) active = models.BooleanField( db_index=True, default=True, help_text="Indicates that this evaluation task is still in use.", verbose_name="Active?" ) random_order = models.BooleanField( db_index=True, default=False, help_text="Indicates that items from this evaluation task should be " "shown in random order.", verbose_name="Random order?" ) class Meta: ordering = ('task_name', 'task_type', 'task_id') verbose_name = "EvaluationTask object" verbose_name_plural = "EvaluationTask objects" def __init__(self, *args, **kwargs): super(EvaluationTask, self).__init__(*args, **kwargs) if not self.task_id: self.task_id = self.__class__._create_task_id() def __unicode__(self): return u'<evaluation-task id="{0}">'.format(self.id) @classmethod def _create_task_id(cls): new_id = uuid.uuid4().hex while cls.objects.filter(task_id=new_id): new_id = uuid.uuid4().hex return new_id def save(self, *args, **kwargs): if not self.id: self.full_clean() super(EvaluationTask, self).save(*args, **kwargs) self.task_xml.open() _tree = fromstring(self.task_xml.read()) self.task_xml.close() for _child in _tree: new_item = EvaluationItem(task=self, item_xml=tostring(_child)) new_item.save() super(EvaluationTask, self).save(*args, **kwargs) def get_absolute_url(self): task_handler_view = 'appraise.evaluation.views.task_handler' kwargs = {'task_id': self.task_id} return reverse(task_handler_view, kwargs=kwargs) def get_status_url(self): status_handler_view = 'appraise.evaluation.views.status_view' kwargs = {'task_id': self.task_id} return reverse(status_handler_view, kwargs=kwargs) def reload_dynamic_fields(self): if self.task_xml: try: _task_xml = fromstring(self.task_xml.read()) self.task_attributes = {} for key, value in _task_xml.attrib.items(): self.task_attributes[key] = value except (ParseError, IOError), msg: self.task_attributes = { 'filename': self.task_xml.name, 'note': msg, } def get_status_header(self): _task_type = self.get_task_type_display() _header = ['Overall completion', 'Average duration'] if _task_type == 'Quality Checking': pass elif _task_type == 'Ranking': pass elif _task_type == 'Post-editing': pass elif _task_type == 'Error classification': pass elif _task_type == '3-Way Ranking': pass return _header def get_status_for_user(self, user=None): _task_type = self.get_task_type_display() _status = [] _items = EvaluationItem.objects.filter(task=self).count() _done = EvaluationResult.objects.filter(user=user, item__task=self).count() _status.append('{0}/{1}'.format(_done, _items)) _percentage = 100*_done/float(_items or 1) _status.append(_percentage) if _percentage < 33: _status.append(' progress-danger') elif _percentage < 66: _status.append(' progress-warning') else: _status.append(' progress-success') _results = EvaluationResult.objects.filter(user=user, item__task=self) _durations = _results.values_list('duration', flat=True) _durations = [datetime_to_seconds(d) for d in _durations if d] _average_duration = sum(_durations) / (float(len(_durations)) or 1) _status.append('{:.2f} sec'.format(_average_duration)) if _task_type == 'Quality Checking': pass elif _task_type == 'Ranking': pass elif _task_type == 'Post-editing': pass elif _task_type == 'Error classification': pass elif _task_type == '3-Way Ranking': pass return _status def get_status_for_users(self): _status = [] _items = EvaluationItem.objects.filter(task=self).count() _done = [] for user in self.users.all(): _done.append(EvaluationResult.objects.filter(user=user, item__task=self).count()) _status.append('{0}/{1}'.format(min(_done or [0]), _items)) _percentage = 100*min(_done or [0])/float(_items or 1) _status.append(_percentage) if _percentage < 33: _status.append(' progress-danger') elif _percentage < 66: _status.append(' progress-warning') else: _status.append(' progress-success') _durations = [] for user in self.users.all(): _results = EvaluationResult.objects.filter(user=user, item__task=self) _durations.extend(_results.values_list('duration', flat=True)) _durations = [datetime_to_seconds(d) for d in _durations if d] _average_duration = sum(_durations) / (float(len(_durations)) or 1) _status.append('{:.2f} sec'.format(_average_duration)) return _status def is_finished_for_user(self, user=None): _items = EvaluationItem.objects.filter(task=self).count() _done = EvaluationResult.objects.filter(user=user, item__task=self).count() return _items == _done def get_finished_for_user(self, user=None): _items = EvaluationItem.objects.filter(task=self).count() _done = EvaluationResult.objects.filter(user=user, item__task=self).count() return (_done, _items) def export_to_xml(self): template = get_template('evaluation/result_task.xml') _task_type = self.get_task_type_display().lower().replace(' ', '-') self.reload_dynamic_fields() _attr = self.task_attributes.items() attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr]) results = [] for item in EvaluationItem.objects.filter(task=self): for _result in item.evaluationresult_set.all(): results.append(_result.export_to_xml()) context = {'task_type': _task_type, 'attributes': attributes, 'results': results} return template.render(Context(context)) @receiver(models.signals.pre_delete, sender=EvaluationTask) def remove_task_xml_file_on_delete(sender, instance, **kwargs): if len(instance.task_xml.name): instance.task_xml.delete(save=False) def validate_item_xml(value): try: if isinstance(value, Element): _tree = value else: _tree = fromstring(value) if not _tree.tag == 'seg': raise ValidationError('Invalid XML: illegal tag: "{0}".'.format( _tree.tag)) for _attr in ('id', 'doc-id'): assert(_attr in _tree.attrib.keys()), 'missing required <seg> attribute {0}'.format(_attr) assert(len(_tree.findall('source')) == 1), 'exactly one <source> element expected' assert(_tree.find('source').text is not None), 'missing required <source> text value' if _tree.find('reference') is not None: assert(_tree.find('reference').text is not None), 'missing required <reference> text value' assert(len(_tree.findall('translation')) >= 1), 'one or more <translation> elements expected' for _translation in _tree.iterfind('translation'): assert('system' in _translation.attrib.keys()), 'missing required <translation> attribute "system"' assert(_translation.text is not None), 'missing required <translation> text value' except (AssertionError, ParseError), msg: raise ValidationError('Invalid XML: "{0}".'.format(msg)) class EvaluationItem(models.Model): task = models.ForeignKey( EvaluationTask, db_index=True ) item_xml = models.TextField( help_text="XML source for this evaluation item.", validators=[validate_item_xml], verbose_name="Translations XML source" ) attributes = None source = None reference = None translations = None class Meta: ordering = ('id',) verbose_name = "EvaluationItem object" verbose_name_plural = "EvaluationItem objects" def __init__(self, *args, **kwargs): super(EvaluationItem, self).__init__(*args, **kwargs) self.reload_dynamic_fields() def __unicode__(self): return u'<evaluation-item id="{0}">'.format(self.id) def save(self, *args, **kwargs): self.full_clean() super(EvaluationItem, self).save(*args, **kwargs) def reload_dynamic_fields(self): if self.item_xml: try: _item_xml = fromstring(self.item_xml) self.attributes = _item_xml.attrib _source = _item_xml.find('source') if _source is not None: self.source = (_source.text, _source.attrib) _reference = _item_xml.find('reference') if _reference is not None: self.reference = (_reference.text, _reference.attrib) self.translations = [] for _translation in _item_xml.iterfind('translation'): self.translations.append((_translation.text, _translation.attrib)) except ParseError: self.source = None self.reference = None self.translations = None class EvaluationResult(models.Model): item = models.ForeignKey( EvaluationItem, db_index=True ) user = models.ForeignKey( User, db_index=True ) duration = models.TimeField(blank=True, null=True, editable=False)
BSD 3-Clause New or Revised License
wildmeorg/wildbook-ia
wbia/algo/hots/chip_match.py
_ChipMatchScorers.score_annot_csum
python
def score_annot_csum(cm, qreq_): cm.evaluate_csum_annot_score(qreq_) cm.set_cannonical_annot_score(cm.algo_annot_scores['csum'])
CommandLine: python -m wbia.algo.hots.chip_match --test-score_annot_csum --show python -m wbia.algo.hots.chip_match --test-score_annot_csum --show --qaid 18 Example: >>> # xdoctest: +REQUIRES(--slow) >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.chip_match import * # NOQA >>> ibs, qreq_, cm_list = plh.testdata_post_sver() >>> cm = cm_list[0] >>> cm.score_annot_csum(qreq_) >>> ut.quit_if_noshow() >>> cm.show_ranked_matches(qreq_, figtitle='score_annot_csum') >>> ut.show_if_requested()
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/algo/hots/chip_match.py#L854-L872
import logging import copy import numpy as np import utool as ut import vtool as vt from os.path import join from operator import xor from wbia.algo.hots import hstypes from wbia.algo.hots import old_chip_match from wbia.algo.hots import scoring from wbia.algo.hots import name_scoring from wbia.algo.hots import _pipeline_helpers as plh print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia') class NeedRecomputeError(Exception): pass DEBUG_CHIPMATCH = False MAX_FNAME_LEN = 80 if ut.WIN32 else 200 TRUNCATE_UUIDS = ut.get_argflag(('--truncate-uuids', '--trunc-uuids')) def safeop(op_, xs, *args, **kwargs): return None if xs is None else op_(xs, *args, **kwargs) def filtnorm_op(filtnorm_, op_, *args, **kwargs): return ( None if filtnorm_ is None else [safeop(op_, xs, *args, **kwargs) for xs in filtnorm_] ) def extend_scores(vals, num): if vals is None: return None return np.append(vals, np.full(num, -np.inf)) def extend_nplists_(x_list, num, shape, dtype): return x_list + ([np.empty(shape, dtype=dtype)] * num) def extend_pylist_(x_list, num, val): return x_list + ([None] * num) def extend_nplists(x_list, num, shape, dtype): return safeop(extend_nplists_, x_list, num, shape, dtype) def extend_pylist(x_list, num, val): return safeop(extend_pylist_, x_list, num, val) def convert_numpy_lists(arr_list, dtype, dims=None): new_arrs = [np.array(arr, dtype=dtype) for arr in arr_list] if dims is not None: new_arrs = [vt.atleast_nd(arr, dims) for arr in new_arrs] return new_arrs def safecast_numpy_lists(arr_list, dtype=None, dims=None): if arr_list is None: new_arrs = None else: new_arrs = [np.array(arr, dtype=dtype) for arr in arr_list] if dims is not None: new_arrs = [vt.ensure_shape(arr, dims) for arr in new_arrs] return new_arrs def aslist(arr): if isinstance(arr, np.ndarray): return arr.tolist() else: return arr def convert_numpy(arr, dtype): return np.array(ut.replace_nones(arr, np.nan), dtype=dtype) def check_arrs_eq(arr1, arr2): if arr1 is None and arr2 is None: return True elif isinstance(arr1, np.ndarray) and isinstance(arr2, np.ndarray): return np.all(arr1 == arr2) elif len(arr1) != len(arr2): return False elif any(len(x) != len(y) for x, y in zip(arr1, arr2)): return False elif all(np.all(x == y) for x, y in zip(arr1, arr2)): return True else: return False def safe_check_lens_eq(arr1, arr2, msg=None): if msg is None: msg = 'outer lengths do not correspond' if arr1 is None or arr2 is None: return True else: assert len(arr1) == len(arr2), msg + '(%r != %r)' % (len(arr1), len(arr2)) def safe_check_nested_lens_eq(arr1, arr2): if arr1 is None or arr2 is None: return True else: safe_check_lens_eq(arr1, arr2, 'outer lengths do not correspond') for count, (x, y) in enumerate(zip(arr1, arr2)): assert len(x) == len( y ), 'inner lengths at position=%r do not correspond (%r != %r)' % ( count, len(x), len(y), ) def _assert_eq_len(list1_, list2_): if list1_ is not None: ut.assert_eq_len(list1_, list2_) def prepare_dict_uuids(class_dict, ibs): class_dict = class_dict.copy() if 'qaid' not in class_dict and 'qannot_uuid' in class_dict: class_dict['qaid'] = ibs.get_annot_aids_from_uuid(class_dict['qannot_uuid']) if 'daid_list' not in class_dict and 'dannot_uuid_list' in class_dict: class_dict['daid_list'] = ibs.get_annot_aids_from_uuid( class_dict['dannot_uuid_list'] ) if 'dnid_list' not in class_dict and 'dannot_uuid_list' in class_dict: daid_list = class_dict['daid_list'] dnid_list = ibs.get_name_rowids_from_text(class_dict['dname_list']) check_set = set([None, ibs.const.UNKNOWN_NAME_ROWID]) dnid_list = [ -daid if dnid in check_set else dnid for daid, dnid in zip(daid_list, dnid_list) ] class_dict['dnid_list'] = dnid_list if 'qnid' not in class_dict and 'qname' in class_dict: qnid = ibs.get_name_rowids_from_text(class_dict['qname']) qaid = class_dict['qaid'] qnid = -qaid if qnid == ibs.const.UNKNOWN_NAME_ROWID else qnid class_dict['qnid'] = qnid if 'unique_nids' not in class_dict and 'unique_name_list' in class_dict: dnid_list = class_dict['dnid_list'] unique_nids_, name_groupxs_ = vt.group_indices(np.array(dnid_list)) class_dict['unique_nids'] = unique_nids_ return class_dict class _ChipMatchVisualization(object): def show_single_namematch( cm, qreq_, dnid=None, rank=None, fnum=None, pnum=None, homog=ut.get_argflag('--homog'), **kwargs ): from wbia.viz import viz_matches assert bool(dnid is None) != bool(rank is None), 'must choose one' if dnid is None: dnid = cm.get_rank_name(rank) qaid = cm.qaid if cm.nid2_nidx is None: raise AssertionError('cm.nid2_nidx has not been evaluated yet') try: nidx = cm.nid2_nidx[dnid] except KeyError: logger.info('CHIP HAS NO GROUND TRUTH MATCHES') cm.assert_self(verbose=False) cm2 = cm.extend_results(qreq_) cm2.assert_self(verbose=False) cm = cm2 nidx = cm.nid2_nidx[dnid] groupxs = cm.name_groupxs[nidx] daids = vt.take2(cm.daid_list, groupxs) dnids = vt.take2(cm.dnid_list, groupxs) assert np.all(dnid == dnids), 'inconsistent naming, dnid=%r, dnids=%r' % ( dnid, dnids, ) groupxs = groupxs.compress(daids != cm.qaid) group_sortx = cm.annot_score_list.take(groupxs).argsort()[::-1] sorted_groupxs = groupxs.take(group_sortx) name_fm_list = ut.take(cm.fm_list, sorted_groupxs) REMOVE_EMPTY_MATCHES = len(sorted_groupxs) > 3 REMOVE_EMPTY_MATCHES = True if REMOVE_EMPTY_MATCHES: isvalid_list = np.array([len(fm) > 0 for fm in name_fm_list]) MAX_MATCHES = 3 isvalid_list = ut.make_at_least_n_items_valid(isvalid_list, MAX_MATCHES) name_fm_list = ut.compress(name_fm_list, isvalid_list) sorted_groupxs = sorted_groupxs.compress(isvalid_list) name_H1_list = ( None if not homog or cm.H_list is None else ut.take(cm.H_list, sorted_groupxs) ) name_fsv_list = ( None if cm.fsv_list is None else ut.take(cm.fsv_list, sorted_groupxs) ) name_fs_list = ( None if name_fsv_list is None else [fsv.prod(axis=1) for fsv in name_fsv_list] ) name_daid_list = ut.take(cm.daid_list, sorted_groupxs) featflag_list = name_scoring.get_chipmatch_namescore_nonvoting_feature_flags( cm, qreq_=qreq_ ) name_featflag_list = ut.take(featflag_list, sorted_groupxs) name_score = cm.name_score_list[nidx] name_rank = ut.listfind(aslist(cm.name_score_list.argsort()[::-1]), nidx) name_annot_scores = cm.annot_score_list.take(sorted_groupxs) _ = viz_matches.show_name_matches( qreq_.ibs, qaid, name_daid_list, name_fm_list, name_fs_list, name_H1_list, name_featflag_list, name_score=name_score, name_rank=name_rank, name_annot_scores=name_annot_scores, qreq_=qreq_, fnum=fnum, pnum=pnum, **kwargs ) return _ def show_single_annotmatch( cm, qreq_, daid=None, fnum=None, pnum=None, homog=False, aid2=None, **kwargs ): from wbia.viz import viz_matches if aid2 is not None: assert daid is None, 'use aid2 instead of daid kwarg' daid = aid2 if daid is None: idx = cm.argsort()[0] daid = cm.daid_list[idx] else: try: idx = cm.daid2_idx[daid] except KeyError: cm = cm.extend_results(qreq_) idx = cm.daid2_idx[daid] fm = cm.fm_list[idx] H1 = None if not homog or cm.H_list is None else cm.H_list[idx] fsv = None if cm.fsv_list is None else cm.fsv_list[idx] fs = None if fsv is None else fsv.prod(axis=1) showkw = dict(fm=fm, fs=fs, H1=H1, fnum=fnum, pnum=pnum, **kwargs) score = None if cm.score_list is None else cm.score_list[idx] viz_matches.show_matches2( qreq_.ibs, cm.qaid, daid, qreq_=qreq_, score=score, **showkw ) def show_ranked_matches(cm, qreq_, clip_top=6, *args, **kwargs): idx_list = ut.listclip(cm.argsort(), clip_top) cm.show_index_matches(qreq_, idx_list, *args, **kwargs) def show_daids_matches(cm, qreq_, daids, *args, **kwargs): idx_list = ut.dict_take(cm.daid2_idx, daids) cm.show_index_matches(qreq_, idx_list, *args, **kwargs) def show_index_matches( cm, qreq_, idx_list, fnum=None, figtitle=None, plottype='annotmatch', **kwargs ): import wbia.plottool as pt if fnum is None: fnum = pt.next_fnum() nRows, nCols = pt.get_square_row_cols(len(idx_list), fix=False) if ut.get_argflag('--vert'): nRows, nCols = nCols, nRows next_pnum = pt.make_pnum_nextgen(nRows, nCols) for idx in idx_list: daid = cm.daid_list[idx] pnum = next_pnum() if plottype == 'namematch': dnid = qreq_.ibs.get_annot_nids(daid) cm.show_single_namematch(qreq_, dnid, pnum=pnum, fnum=fnum, **kwargs) elif plottype == 'annotmatch': cm.show_single_annotmatch(qreq_, daid, fnum=fnum, pnum=pnum, **kwargs) score = vt.trytake(cm.score_list, idx) annot_score = vt.trytake(cm.annot_score_list, idx) score_str = ( 'score = %.3f' % (score,) if score is not None else 'score = None' ) annot_score_str = ( 'annot_score = %.3f' % (annot_score,) if annot_score is not None else 'annot_score = None' ) title = score_str + '\n' + annot_score_str pt.set_title(title) else: raise NotImplementedError('Unknown plottype=%r' % (plottype,)) if figtitle is not None: pt.set_figtitle(figtitle) show_matches = show_single_annotmatch def ishow_single_annotmatch(cm, qreq_, aid2=None, **kwargs): from wbia.viz.interact import interact_matches kwshow = { 'mode': 1, } if aid2 is None: aid2 = cm.get_top_aids(ntop=1)[0] logger.info('[cm] ishow_single_annotmatch aids(%s, %s)' % (cm.qaid, aid2)) kwshow.update(**kwargs) try: inter = interact_matches.MatchInteraction( qreq_.ibs, cm, aid2, qreq_=qreq_, **kwshow ) inter.start() return inter except Exception as ex: ut.printex(ex, 'failed in cm.ishow_single_annotmatch', keys=['aid', 'qreq_']) raise ishow_match = ishow_single_annotmatch ishow_matches = ishow_single_annotmatch def ishow_analysis(cm, qreq_, **kwargs): from wbia.viz.interact import interact_qres kwshow = { 'show_query': False, 'show_timedelta': True, } kwshow.update(kwargs) return interact_qres.ishow_analysis(qreq_.ibs, cm, qreq_=qreq_, **kwshow) def show_analysis(cm, qreq_, **kwargs): from wbia.viz import viz_qres kwshow = { 'show_query': False, 'show_timedelta': True, } kwshow.update(kwargs) return viz_qres.show_qres_analysis(qreq_.ibs, cm, qreq_=qreq_, **kwshow) def imwrite_single_annotmatch(cm, qreq_, aid, **kwargs): import wbia.plottool as pt import matplotlib as mpl save_keys = ['dpi', 'figsize', 'saveax', 'fpath', 'fpath_strict', 'verbose'] save_vals = ut.dict_take_pop(kwargs, save_keys, None) savekw = dict(zip(save_keys, save_vals)) fpath = savekw.pop('fpath') if fpath is None and 'fpath_strict' not in savekw: savekw['usetitle'] = True was_interactive = mpl.is_interactive() if was_interactive: mpl.interactive(False) fnum = pt.ensure_fnum(kwargs.pop('fnum', None)) fig = pt.plt.figure(fnum) fig.clf() cm.show_single_annotmatch(qreq_, aid, colorbar_=False, fnum=fnum, **kwargs) img_fpath = pt.save_figure(fpath=fpath, fig=fig, **savekw) pt.plt.close(fig) if was_interactive: mpl.interactive(was_interactive) return img_fpath @profile def imwrite_single_annotmatch2(cm, qreq_, aid, fpath, **kwargs): import wbia.plottool as pt import matplotlib as mpl save_keys = ['dpi', 'figsize', 'saveax', 'verbose'] save_vals = ut.dict_take_pop(kwargs, save_keys, None) savekw = dict(zip(save_keys, save_vals)) was_interactive = mpl.is_interactive() if was_interactive: mpl.interactive(False) fnum = pt.ensure_fnum(kwargs.pop('fnum', None)) fig = pt.plt.figure(fnum) fig.clf() cm.show_single_annotmatch(qreq_, aid, colorbar_=False, fnum=fnum, **kwargs) axes_extents = pt.extract_axes_extents(fig) assert len(axes_extents) == 1, 'more than one axes' extent = axes_extents[0] fig.savefig(fpath, bbox_inches=extent, **savekw) pt.plt.close(fig) if was_interactive: mpl.interactive(was_interactive) @profile def render_single_annotmatch(cm, qreq_, aid, **kwargs): import io import cv2 import wbia.plottool as pt import matplotlib as mpl save_keys = ['dpi', 'figsize', 'saveax', 'verbose'] save_vals = ut.dict_take_pop(kwargs, save_keys, None) savekw = dict(zip(save_keys, save_vals)) was_interactive = mpl.is_interactive() if was_interactive: mpl.interactive(False) fnum = pt.ensure_fnum(kwargs.pop('fnum', None)) fig = pt.plt.figure(fnum) fig.clf() cm.show_single_annotmatch(qreq_, aid, colorbar_=False, fnum=fnum, **kwargs) axes_extents = pt.extract_axes_extents(fig) assert len(axes_extents) == 1, 'more than one axes' extent = axes_extents[0] with io.BytesIO() as stream: fig.savefig(stream, bbox_inches=extent, **savekw) stream.seek(0) data = np.fromstring(stream.getvalue(), dtype=np.uint8) image = cv2.imdecode(data, 1) pt.plt.close(fig) if was_interactive: mpl.interactive(was_interactive) return image def qt_inspect_gui(cm, ibs, ranks_top=6, qreq_=None, name_scoring=False): logger.info('[cm] qt_inspect_gui') from wbia.gui import inspect_gui from wbia import guitool guitool.ensure_qapp() cm_list = [cm] logger.info('[inspect_matches] make_qres_widget') qres_wgt = inspect_gui.QueryResultsWidget( ibs, cm_list, ranks_top=ranks_top, name_scoring=name_scoring, qreq_=qreq_ ) logger.info('[inspect_matches] show') qres_wgt.show() logger.info('[inspect_matches] raise') qres_wgt.raise_() return qres_wgt class _ChipMatchScorers(object): @profile def evaluate_csum_annot_score(cm, qreq_=None): fs_list = cm.get_fsv_prod_list() csum_scores = np.array([np.sum(fs) for fs in fs_list]) cm.algo_annot_scores['csum'] = csum_scores @profile def evaluate_nsum_name_score(cm, qreq_): cm.evaluate_dnids(qreq_) fmech_scores = name_scoring.compute_fmech_score(cm, qreq_=qreq_) try: normsum = qreq_.qparams.normsum if normsum: assert False, 'depricated' except AttributeError: pass cm.algo_name_scores['nsum'] = fmech_scores def evaluate_maxcsum_name_score(cm, qreq_): grouped_csum = vt.apply_grouping(cm.algo_annot_scores['csum'], cm.name_groupxs) maxcsum_scores = np.array([scores.max() for scores in grouped_csum]) cm.algo_name_scores['maxcsum'] = maxcsum_scores def evaluate_sumamech_name_score(cm, qreq_): grouped_csum = vt.apply_grouping(cm.algo_annot_scores['csum'], cm.name_groupxs) sumamech_score_list = np.array([scores.sum() for scores in grouped_csum]) cm.algo_name_scores['sumamech'] = sumamech_score_list @profile
Apache License 2.0
nerdland-unofficial-fans/nerdlandbot
nerdlandbot/commands/purger.py
purger.add_purger
python
async def add_purger(self, ctx: commands.Context, text_channel: str, max_age: int): guild_data = await get_guild_data(ctx.message.guild.id) if not guild_data.user_is_admin(ctx.author): gif = translate("not_admin_gif", await culture(ctx)) return await ctx.send(gif) text_channel = text_channel.lower() channel = get_channel(ctx,text_channel) if not channel: await ctx.channel.send(translate("channel_nonexistant", await culture(ctx))) raise Exception("Invalid text channel provided") if isinstance(channel, discord.VoiceChannel): await ctx.channel.send(translate("channel_is_voice", await culture(ctx))) return channel_permissions = channel.permissions_for(ctx.me) if not (channel_permissions.manage_messages and channel_permissions.read_message_history): return await ctx.send(translate("purger_permissions", await culture(ctx))) add_response = await guild_data.add_purger(channel, max_age) msg = "" if add_response: msg = translate("purger_added", await culture(ctx)).format(str(channel.id), max_age) else: msg = translate("purger_exists", await culture(ctx)).format(str(channel.id)) info(msg) await ctx.send(msg)
Add a channel to be regularly purged. :param ctx: The current context. (discord.ext.commands.Context) :param text_channel: The text channel that will be purged (str) :param max_age: The max age of messages in days (int)
https://github.com/nerdland-unofficial-fans/nerdlandbot/blob/6271068afa4e1d0d655042f0d7a8a98ee27ffe3f/nerdlandbot/commands/purger.py#L18-L59
import discord import os import requests from discord.ext import commands, tasks from nerdlandbot.commands.GuildData import get_all_guilds_data, get_guild_data, GuildData from nerdlandbot.helpers.log import info, fatal from nerdlandbot.helpers.channel import get_channel from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture from nerdlandbot.translations.Translations import get_text as translate class purger(commands.Cog, name="Purger_lists"): @commands.command( name="add_purger", usage="add_purger_usage", help="add_purger_help", ) @commands.guild_only()
MIT License
ucopacme/aws-orgs
awsorgs/orgs.py
list_policies_in_ou
python
def list_policies_in_ou (org_client, ou_id): policies_in_ou = org_client.list_policies_for_target( TargetId=ou_id, Filter='SERVICE_CONTROL_POLICY',)['Policies'] return sorted([ou['Name'] for ou in policies_in_ou])
Query deployed AWS organanization. Return a list (of type dict) of policies attached to OrganizationalUnit referenced by 'ou_id'.
https://github.com/ucopacme/aws-orgs/blob/441e22de53de7fa462aa5c1be3dd471902942eca/awsorgs/orgs.py#L101-L108
import yaml import json import time import boto3 from docopt import docopt import awsorgs import awsorgs.utils from awsorgs.utils import * from awsorgs.spec import * def validate_accounts_unique_in_org(log, root_spec): def map_accounts(spec, account_map={}): if 'Accounts' in spec and spec['Accounts']: for account in spec['Accounts']: if account in account_map: account_map[account].append(spec['Name']) else: account_map[account] = [(spec['Name'])] if 'Child_OU' in spec and spec['Child_OU']: for child_spec in spec['Child_OU']: map_accounts(child_spec, account_map) return account_map unique = True for account, ou in list(map_accounts(root_spec).items()): if len(ou) > 1: log.error("Account '%s' set in multiple OU: %s" % (account, ou)) unique = False if not unique: log.critical("Invalid org_spec: Do not assign accounts to multiple " "Organizatinal Units") sys.exit(1) def enable_policy_type_in_root(org_client, root_id): p_type = org_client.list_roots()['Roots'][0]['PolicyTypes'] if (not p_type or (p_type[0]['Type'] == 'SERVICE_CONTROL_POLICY' and p_type[0]['Status'] != 'ENABLED')): org_client.enable_policy_type(RootId=root_id, PolicyType='SERVICE_CONTROL_POLICY') def get_parent_id(org_client, account_id): parents = org_client.list_parents(ChildId=account_id)['Parents'] try: len(parents) == 1 return parents[0]['Id'] except: raise RuntimeError("API Error: account '%s' has more than one parent: " % (account_id, parents))
MIT License
tensorflow/moonlight
moonlight/page_processors.py
create_processors
python
def create_processors(structure, staffline_extractor=None): yield staff_processor.StaffProcessor(structure, staffline_extractor) yield stems.Stems(structure) yield beam_processor.BeamProcessor(structure) yield note_dots.NoteDots(structure) yield CenteredRests() yield repeated.FixRepeatedRests() yield barlines.Barlines(structure) yield section_barlines.SectionBarlines(structure) yield section_barlines.MergeStandardAndBeginRepeatBars(structure)
Generator for the processors to be applied to the Page in order. Args: structure: The computed `Structure`. staffline_extractor: The staffline extractor to use for scaling glyph x coordinates. Optional. Yields: Callables which accept a single `Page` as an argument, and return it (either modifying in place or returning a modified copy).
https://github.com/tensorflow/moonlight/blob/b34e8a0106a7d877e4831555be8972a5254f5d96/moonlight/page_processors.py#L38-L60
from __future__ import absolute_import from __future__ import division from __future__ import print_function from moonlight.glyphs import glyph_types from moonlight.glyphs import note_dots from moonlight.glyphs import repeated from moonlight.staves import staff_processor from moonlight.structure import barlines from moonlight.structure import beam_processor from moonlight.structure import section_barlines from moonlight.structure import stems
Apache License 2.0
devopshq/teamcity
dohq_teamcity/models/vcs_labeling.py
VcsLabeling.__init__
python
def __init__(self, label_name=None, type=None, branch_filter=None, vcs_roots=None, teamcity=None): self._label_name = None self._type = None self._branch_filter = None self._vcs_roots = None self.discriminator = None if label_name is not None: self.label_name = label_name if type is not None: self.type = type if branch_filter is not None: self.branch_filter = branch_filter if vcs_roots is not None: self.vcs_roots = vcs_roots super(VcsLabeling, self).__init__(teamcity=teamcity)
VcsLabeling - a model defined in Swagger
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/vcs_labeling.py#L36-L53
from dohq_teamcity.custom.base_model import TeamCityObject class VcsLabeling(TeamCityObject): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'label_name': 'str', 'type': 'str', 'branch_filter': 'str', 'vcs_roots': 'VcsRoots' } attribute_map = { 'label_name': 'labelName', 'type': 'type', 'branch_filter': 'branchFilter', 'vcs_roots': 'vcsRoots' }
MIT License
jmurty/xml4h
xml4h/nodes.py
Element.attribute_node
python
def attribute_node(self, name, ns_uri=None): attr_impl_node = self.adapter.get_node_attribute_node( self.impl_node, name, ns_uri) return self.adapter.wrap_node( attr_impl_node, self.adapter.impl_document, self.adapter)
:param string name: the name of the attribute to return. :param ns_uri: a URI defining a namespace constraint on the attribute. :type ns_uri: string or None :return: this element's attributes that match ``ns_uri`` as :class:`Attribute` nodes.
https://github.com/jmurty/xml4h/blob/83bc0a91afe5d6e17d6c99ec43dc0aec9593cc06/xml4h/nodes.py#L917-L929
import six import collections import functools import xml4h ELEMENT_NODE = 1 ATTRIBUTE_NODE = 2 TEXT_NODE = 3 CDATA_NODE = 4 ENTITY_REFERENCE_NODE = 5 ENTITY_NODE = 6 PROCESSING_INSTRUCTION_NODE = 7 COMMENT_NODE = 8 DOCUMENT_NODE = 9 DOCUMENT_TYPE_NODE = 10 DOCUMENT_FRAGMENT_NODE = 11 NOTATION_NODE = 12 class Node(object): XMLNS_URI = 'http://www.w3.org/2000/xmlns/' def __init__(self, node, adapter): if node is None: raise xml4h.exceptions.IncorrectArgumentTypeException( node, [object]) if adapter is None: raise xml4h.exceptions.IncorrectArgumentTypeException( adapter, [object]) self._impl_node = node self._adapter = adapter def __eq__(self, other): if self is other: return True elif not isinstance(other, Node): return False return (self.impl_document == other.impl_document and self.impl_node == other.impl_node) def __repr__(self): return '<%s.%s>' % ( self.__class__.__module__, self.__class__.__name__) @property def impl_node(self): return self._impl_node @property def impl_document(self): return self.adapter.impl_document @property def adapter(self): return self._adapter @property def adapter_class(self): return self._adapter.__class__ def has_feature(self, feature_name): return self.adapter.has_feature(feature_name) @property def document(self): if self.is_document: return self return self.adapter.wrap_document(self.adapter.impl_document) @property def root(self): if self.is_root: return self return self.adapter.wrap_node( self.adapter.impl_root_element, self.adapter.impl_document, self.adapter) @property def is_root(self): return self.impl_node == self.adapter.impl_root_element @property def node_type(self): return self._node_type def is_type(self, node_type_constant): return self.node_type == node_type_constant @property def is_element(self): return self.is_type(ELEMENT_NODE) @property def is_attribute(self): return self.is_type(ATTRIBUTE_NODE) @property def is_text(self): return self.is_type(TEXT_NODE) @property def is_cdata(self): return self.is_type(CDATA_NODE) @property def is_entity_reference(self): return self.is_type(ENTITY_REFERENCE_NODE) @property def is_entity(self): return self.is_type(ENTITY_NODE) @property def is_processing_instruction(self): return self.is_type(PROCESSING_INSTRUCTION_NODE) @property def is_comment(self): return self.is_type(COMMENT_NODE) @property def is_document(self): return self.is_type(DOCUMENT_NODE) @property def is_document_type(self): return self.is_type(DOCUMENT_TYPE_NODE) @property def is_document_fragment(self): return self.is_type(DOCUMENT_FRAGMENT_NODE) @property def is_notation(self): return self.is_type(NOTATION_NODE) def _convert_nodelist(self, impl_nodelist): nodelist = [ self.adapter.wrap_node(n, self.adapter.impl_document, self.adapter) for n in impl_nodelist] return NodeList(nodelist) @property def parent(self): parent_impl_node = self.adapter.get_node_parent(self.impl_node) return self.adapter.wrap_node( parent_impl_node, self.adapter.impl_document, self.adapter) @property def ancestors(self): ancestors = [] p = self.parent while p: ancestors.append(p) p = p.parent return NodeList(ancestors) @property def children(self): impl_nodelist = self.adapter.get_node_children(self.impl_node) return self._convert_nodelist(impl_nodelist) def child(self, local_name=None, name=None, ns_uri=None, node_type=None, filter_fn=None): return self.children(name=name, local_name=local_name, ns_uri=ns_uri, node_type=node_type, filter_fn=filter_fn, first_only=True) @property def attributes(self): return None @property def attribute_nodes(self): return None @property def siblings(self): impl_nodelist = self.adapter.get_node_children(self.parent.impl_node) return self._convert_nodelist( [n for n in impl_nodelist if n != self.impl_node]) @property def siblings_before(self): impl_nodelist = self.adapter.get_node_children(self.parent.impl_node) before_nodelist = [] for n in impl_nodelist: if n == self.impl_node: break before_nodelist.append(n) return self._convert_nodelist(before_nodelist) @property def siblings_after(self): impl_nodelist = self.adapter.get_node_children(self.parent.impl_node) after_nodelist = [] is_after_myself = False for n in impl_nodelist: if is_after_myself: after_nodelist.append(n) elif n == self.impl_node: is_after_myself = True return self._convert_nodelist(after_nodelist) @property def namespace_uri(self): return self.adapter.get_node_namespace_uri(self.impl_node) ns_uri = namespace_uri def delete(self, destroy=True): removed_child = self.adapter.remove_node_child( self.adapter.get_node_parent(self.impl_node), self.impl_node, destroy_node=destroy) if removed_child is not None: return self.adapter.wrap_node(removed_child, None, self.adapter) else: return None def clone_node(self, node): if isinstance(node, xml4h.nodes.Node): child_impl_node = node.impl_node else: child_impl_node = node self.adapter.import_node(self.impl_node, child_impl_node, clone=True) def transplant_node(self, node): if isinstance(node, xml4h.nodes.Node): child_impl_node = node.impl_node original_parent_impl_node = node.parent.impl_node else: child_impl_node = node original_parent_impl_node = self.adapter.get_node_parent(node) self.adapter.import_node(self.impl_node, child_impl_node, original_parent_impl_node, clone=False) def find(self, name=None, ns_uri=None, first_only=False): if name is None: name = '*' if ns_uri is None: ns_uri = '*' impl_nodelist = self.adapter.find_node_elements( self.impl_node, name=name, ns_uri=ns_uri) if first_only: if impl_nodelist: return self.adapter.wrap_node( impl_nodelist[0], self.adapter.impl_document, self.adapter) else: return None return self._convert_nodelist(impl_nodelist) def find_first(self, name=None, ns_uri=None): return self.find(name=name, ns_uri=ns_uri, first_only=True) def find_doc(self, name=None, ns_uri=None, first_only=False): return self.document.find(name=name, ns_uri=ns_uri, first_only=first_only) def write(self, writer, encoding='utf-8', indent=0, newline='', omit_declaration=False, node_depth=0, quote_char='"'): xml4h.write_node(self, writer, encoding=encoding, indent=indent, newline=newline, omit_declaration=omit_declaration, node_depth=node_depth, quote_char=quote_char) def write_doc(self, writer, *args, **kwargs): self.document.write(writer, *args, **kwargs) def xml(self, encoding='utf-8', indent=4, **kwargs): if encoding is None: writer = six.StringIO() else: writer = six.BytesIO() self.write(writer, encoding=encoding, indent=indent, **kwargs) xml_bytes = writer.getvalue() if encoding: return xml_bytes.decode(encoding) else: return xml_bytes def xml_doc(self, encoding='utf-8', **kwargs): return self.document.xml(encoding=encoding, **kwargs) class NodeAttrAndChildElementLookupsMixin(object): def __getitem__(self, attr_name): result = self.attributes[attr_name] if result is None: raise KeyError(attr_name) else: return result def __getattr__(self, child_name): if child_name.startswith('_'): pass else: if child_name.endswith('_'): child_name = child_name[:-1] results = self.children(local_name=child_name, node_type=Element) if len(results) == 1: return results[0] elif len(results) > 1: return results raise AttributeError( "%s object has no attribute '%s'" % (self, child_name)) class XPathMixin(object): def _maybe_wrap_node(self, node): if isinstance(node, (str, int, float)): return node else: return self.adapter.wrap_node( node, self.adapter.impl_document, self.adapter) def xpath(self, xpath, **kwargs): result = self.adapter.xpath_on_node(self.impl_node, xpath, **kwargs) if isinstance(result, (list, tuple)): return [self._maybe_wrap_node(r) for r in result] else: return self._maybe_wrap_node(result) class Document(Node, NodeAttrAndChildElementLookupsMixin, XPathMixin): _node_type = DOCUMENT_NODE class DocumentType(Node): _node_type = DOCUMENT_TYPE_NODE class DocumentFragment(Node): _node_type = DOCUMENT_FRAGMENT_NODE class Notation(Node): _node_type = NOTATION_NODE class Entity(Node): _node_type = ENTITY_NODE @property def notation_name(self): return self.name class EntityReference(Node): _node_type = ENTITY_REFERENCE_NODE class NameValueNodeMixin(Node): def __repr__(self): return '<%s.%s: "%s">' % ( self.__class__.__module__, self.__class__.__name__, self.name) def _tounicode(self, value): if value is None or isinstance(value, six.string_types): return value else: return six.text_type(value) @property def prefix(self): return self._tounicode( self.adapter.get_node_name_prefix(self.impl_node)) @property def local_name(self): return self._tounicode( self.adapter.get_node_local_name(self.impl_node)) @property def name(self): return self._tounicode( self.adapter.get_node_name(self.impl_node)) @property def value(self): return self._tounicode( self.adapter.get_node_value(self.impl_node)) @value.setter def value(self, value): self.adapter.set_node_value(self.impl_node, value) class Text(NameValueNodeMixin): _node_type = TEXT_NODE class CDATA(NameValueNodeMixin): _node_type = CDATA_NODE class Comment(NameValueNodeMixin): _node_type = COMMENT_NODE class Attribute(NameValueNodeMixin): _node_type = ATTRIBUTE_NODE class ProcessingInstruction(NameValueNodeMixin): _node_type = PROCESSING_INSTRUCTION_NODE target = NameValueNodeMixin.name data = NameValueNodeMixin.value class Element(NameValueNodeMixin, NodeAttrAndChildElementLookupsMixin, XPathMixin): _node_type = ELEMENT_NODE @property def builder(self): return xml4h.Builder(self) @property def text(self): return self.adapter.get_node_text(self.impl_node) @text.setter def text(self, text): self.adapter.set_node_text(self.impl_node, text) def _set_element_attributes(self, element, attr_obj=None, ns_uri=None, **attr_dict): if attr_obj is not None: if isinstance(attr_obj, dict): attr_dict.update(attr_obj) elif isinstance(attr_obj, (list, tuple)): for n, v in attr_obj: attr_dict[n] = v else: raise xml4h.exceptions.IncorrectArgumentTypeException( attr_obj, [dict, list, tuple]) def cmp(a, b): return (a > b) - (a < b) def _xmlns_first(x, y): nx, ny = x[0], y[0] if nx.startswith('xmlns') and ny.startswith('xmlns'): return cmp(nx, ny) elif nx.startswith('xmlns'): return -1 elif ny.startswith('xmlns'): return 1 else: return cmp(nx, ny) _xmlns_first = functools.cmp_to_key(_xmlns_first) attr_list = sorted(list(attr_dict.items()), key=_xmlns_first) for attr_name, v in attr_list: prefix, name, my_ns_uri = self.adapter.get_ns_info_from_node_name( attr_name, element) if ' ' in name: raise ValueError("Invalid attribute name value contains space") if not prefix and '}' in attr_name: prefix = self.adapter.get_ns_prefix_for_uri( element, my_ns_uri, auto_generate_prefix=True) self.adapter.set_node_attribute_value(element, 'xmlns:%s' % prefix, my_ns_uri, ns_uri=self.XMLNS_URI) if my_ns_uri is None: my_ns_uri = ns_uri if ns_uri is not None: if ns_uri == self.adapter.get_node_namespace_uri(element): my_ns_uri = None if not isinstance(v, six.string_types): v = six.text_type(v) if prefix: qname = '%s:%s' % (prefix, name) else: qname = name self.adapter.set_node_attribute_value( element, qname, v, ns_uri=my_ns_uri) def set_attributes(self, attr_obj=None, ns_uri=None, **attr_dict): self._set_element_attributes(self.impl_node, attr_obj=attr_obj, ns_uri=ns_uri, **attr_dict) @property def attributes(self): attr_impl_nodes = self.adapter.get_node_attributes(self.impl_node) return AttributeDict(attr_impl_nodes, self.impl_node, self.adapter) @attributes.setter def attributes(self, attr_obj): for attr_name in [a for a in self.attributes if 'xmlns' not in a]: self.adapter.remove_node_attribute(self.impl_node, attr_name) for attr_name in self.attributes: self.adapter.remove_node_attribute(self.impl_node, attr_name) self._set_element_attributes(self.impl_node, attr_obj=attr_obj) attrib = attributes attrs = attributes @property def attribute_nodes(self): impl_attr_nodes = self.adapter.get_node_attributes(self.impl_node) wrapped_attr_nodes = [ self.adapter.wrap_node(a, self.adapter.impl_document, self.adapter) for a in impl_attr_nodes] return sorted(wrapped_attr_nodes, key=lambda x: x.name)
MIT License
renatahodovan/fuzzinator
fuzzinator/call/call_decorator.py
CallDecorator.call
python
def call(self, cls, obj, *, test, **kwargs): return super(cls, obj).__call__(test=test, **kwargs)
Call ``obj`` of type ``cls``. The default operation is to call the ``__call__`` method of the original version of the SUT call class and return its result. Sub-classes of :class:`CallDecorator` may override this method if customization of calling the SUT is needed. Usually, the overridden method has to call the original ``__call__`` at some point, which can be performed either by ``super().call(cls, obj, test=test, **kwargs)`` (which will call this method, and then transitively the original ``__call__``) or by ``super(cls, obj).__call__(test=test, **kwargs)`` (which calls the original ``__call__`` directly). :param cls: The decorated version of the SUT call class, as returned by :meth:`__call__`. :param obj: The SUT call instance to invoke. :param test: Input or test case for the SUT call, as defined by :meth:`Call.__call__`. :return: The result of the SUT call, as defined by :meth:`Call.__call__`.
https://github.com/renatahodovan/fuzzinator/blob/49e6cf1b5dad59e82f7bed5f14b23dbd7c520ad0/fuzzinator/call/call_decorator.py#L81-L103
from inspect import signature class CallDecorator(object): def init(self, cls, obj, **kwargs): super(cls, obj).__init__(**kwargs) def enter(self, cls, obj): return super(cls, obj).__enter__() def exit(self, cls, obj, *exc): return super(cls, obj).__exit__(*exc)
BSD 3-Clause New or Revised License
berkeleyautomation/meshrender
meshrender/render.py
OpenGLRenderer._depth
python
def _depth(self): camera = self.scene.camera width = camera.intrinsics.width height = camera.intrinsics.height glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf) glViewport(0, 0, width, height) glClearColor(0.0, 0.0, 0.0, 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glUseProgram(self._depth_shader) v_id = glGetUniformLocation(self._depth_shader, 'V') p_id = glGetUniformLocation(self._depth_shader, 'P') m_id = glGetUniformLocation(self._depth_shader, 'M') glUniformMatrix4fv(v_id, 1, GL_TRUE, camera.V) glUniformMatrix4fv(p_id, 1, GL_TRUE, camera.P) for vaid, obj in zip(self._vaids, self.scene.objects.values()): if not obj.enabled: continue material = obj.material mesh = obj.mesh glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix) glBindVertexArray(vaid) n_instances = 1 if isinstance(obj, InstancedSceneObject): n_instances = obj.n_instances if material.smooth: glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances) else: glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances) glBindVertexArray(0) glUseProgram(0) glFlush() glBindFramebuffer(GL_READ_FRAMEBUFFER, self._framebuf) depth_buf = (GLfloat * (width * height))(0) glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depth_buf) depth_im = np.frombuffer(depth_buf, dtype=np.float32).reshape((height, width)) depth_im = np.flip(depth_im, axis=0) inf_inds = (depth_im == 1.0) depth_im = 2.0 * depth_im - 1.0 z_near, z_far = camera.z_near, camera.z_far depth_im = 2.0 * z_near * z_far / (z_far + z_near - depth_im * (z_far - z_near)) depth_im[inf_inds] = 0.0 return depth_im
Render a depth image of the scene.
https://github.com/berkeleyautomation/meshrender/blob/25b6fb711ef7a7871a5908459e6be5c76a04b631/meshrender/render.py#L416-L478
import ctypes import numpy as np import weakref import os _USE_EGL_OFFSCREEN = False if 'MESHRENDER_EGL_OFFSCREEN' in os.environ: os.environ['PYOPENGL_PLATFORM'] = 'egl' _USE_EGL_OFFSCREEN = True try: import OpenGL from OpenGL.GL import * from OpenGL.GL import shaders from OpenGL.arrays import * except Exception: import logging logging.warning('Cannot import OpenGL -- rendering will be broken!') from .constants import MAX_N_LIGHTS from .light import AmbientLight, PointLight, DirectionalLight from .shaders import vertex_shader, fragment_shader, depth_vertex_shader, depth_fragment_shader from .scene_object import InstancedSceneObject C_VOID_PS = [] for i in range(5): C_VOID_PS.append(ctypes.c_void_p(4*4*i)) class OpenGLRenderer(object): def __init__(self, scene): self.scene = scene self._width = self.scene.camera.intrinsics.width self._height = self.scene.camera.intrinsics.height self._vaids = None self._colorbuf, self._depthbuf = None, None self._framebuf = None self._init_gl_context() self._bind_frame_buffer() glEnable(GL_DEPTH_TEST) glDepthMask(GL_TRUE) glDepthFunc(GL_LESS) glDepthRange(0.0, 1.0) self._buffers = None self._vaids = self._load_meshes() glBindVertexArray(self._vaids[0]) self._full_shader = self._load_shaders(vertex_shader, fragment_shader) self._depth_shader = self._load_shaders(depth_vertex_shader, depth_fragment_shader) glBindVertexArray(0) def _init_gl_context(self): if _USE_EGL_OFFSCREEN: self._init_egl() else: self._init_pyglet() def _make_gl_context_current(self): if not _USE_EGL_OFFSCREEN: if self._window: self._window.switch_to() def _init_pyglet(self): import pyglet pyglet.options['shadow_window'] = False self._window = None conf = pyglet.gl.Config( depth_size=24, double_buffer=True, major_version=3, minor_version=2 ) try: self._window = pyglet.window.Window(config=conf, visible=False, resizable=False, width=1, height=1) except Exception as e: raise ValueError('Failed to initialize Pyglet window with an OpenGL >= 3+ context. ' 'If you\'re logged in via SSH, ensure that you\'re running your script ' 'with vglrun (i.e. VirtualGL). Otherwise, the internal error message was: ' '"{}"'.format(e.message)) def _init_egl(self): from OpenGL.EGL import EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE, EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, EGL_HEIGHT, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT, EGL_OPENGL_BIT, EGL_CONFIG_CAVEAT, EGL_NONE, EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, EGL_WIDTH, EGL_OPENGL_API, eglGetDisplay, eglInitialize, eglChooseConfig, eglBindAPI, eglCreatePbufferSurface, eglCreateContext, eglMakeCurrent, EGLConfig self._egl_display = None self._egl_surface = None self._egl_context = None config_attributes = arrays.GLintArray.asArray([ EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, 8, EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_DEPTH_SIZE, 24, EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT, EGL_OPENGL_BIT, EGL_NONE ]) major, minor = ctypes.c_long(), ctypes.c_long() num_configs = ctypes.c_long() configs = (EGLConfig*1)() orig_dpy = None if 'DISPLAY' in os.environ: orig_dpy = os.environ['DISPLAY'] del os.environ['DISPLAY'] self._egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY) if orig_dpy is not None: os.environ['DISPLAY'] = orig_dpy eglInitialize(self._egl_display, major, minor) eglChooseConfig(self._egl_display, config_attributes, configs, 1, num_configs) eglBindAPI(EGL_OPENGL_API) self._egl_surface = eglCreatePbufferSurface(self._egl_display, configs[0], [EGL_WIDTH, self._width, EGL_HEIGHT, self._height, EGL_NONE]) self._egl_context = eglCreateContext(self._egl_display, configs[0], EGL_NO_CONTEXT, None) eglMakeCurrent(self._egl_display, self._egl_surface, self._egl_surface, self._egl_context) @property def scene(self): return self._scene() @scene.setter def scene(self, s): self._scene = weakref.ref(s) def render(self, render_color=True, front_and_back=False): self._make_gl_context_current() width = self.scene.camera.intrinsics.width height = self.scene.camera.intrinsics.height if width != self._width or height != self._height: self._width = width self._height = height self._bind_frame_buffer() if render_color: return self._color_and_depth(front_and_back) else: return self._depth() def close(self): if self._full_shader: glDeleteProgram(self._full_shader) self._full_shader = None if self._depth_shader: glDeleteProgram(self._depth_shader) self._depth_shader = None if self._buffers: glDeleteBuffers(len(self._buffers), self._buffers) self._buffers = None if self._colorbuf and self._depthbuf: glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf]) self._colorbuf = None self._depthbuf = None if self._framebuf: glDeleteFramebuffers(1, [self._framebuf]) self._framebuf = None OpenGL.contextdata.cleanupContext() if _USE_EGL_OFFSCREEN: from OpenGL.EGL import eglDestroySurface, eglDestroyContext, eglTerminate if self._egl_display is not None: if self._egl_context is not None: eglDestroyContext(self._egl_display, self._egl_context) self._egl_context = None if self._egl_surface: eglDestroySurface(self._egl_display, self._egl_surface) self._egl_surface = None eglTerminate(self._egl_display) self._egl_display = None else: if self._window is not None: try: self._window.context.destroy() self._window.close() except: pass self._window = None def _bind_frame_buffer(self): if self._framebuf is not None: glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf]) glDeleteFramebuffers(1, [self._framebuf]) self._colorbuf, self._depthbuf = glGenRenderbuffers(2) glBindRenderbuffer(GL_RENDERBUFFER, self._colorbuf) glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, self._width, self._height) glBindRenderbuffer(GL_RENDERBUFFER, self._depthbuf) glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, self._width, self._height) self._framebuf = glGenFramebuffers(1) glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf) glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, self._colorbuf) glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self._depthbuf) def _load_shaders(self, vertex_shader, fragment_shader): shader = shaders.compileProgram( shaders.compileShader(vertex_shader, GL_VERTEX_SHADER), shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER) ) return shader def _load_meshes(self): VA_ids = glGenVertexArrays(len(self.scene.objects)) self._buffers = [] if len(self.scene.objects) == 1: VA_ids = [VA_ids] null = C_VOID_PS[0] for VA_id, obj in zip(VA_ids, self.scene.objects.values()): mesh = obj.mesh material = obj.material glBindVertexArray(VA_id) if material.smooth: vertexbuffer = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) glEnableVertexAttribArray(0) glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null) glBufferData(GL_ARRAY_BUFFER, 4*3*len(mesh.vertices), np.array(mesh.vertices.flatten(), dtype=np.float32), GL_STATIC_DRAW) normalbuffer = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) glEnableVertexAttribArray(1) glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null) glBufferData(GL_ARRAY_BUFFER, 4*3*len(mesh.vertex_normals), np.array(mesh.vertex_normals.flatten(), dtype=np.float32), GL_STATIC_DRAW) elementbuffer = glGenBuffers(1) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer) glBufferData(GL_ELEMENT_ARRAY_BUFFER, 4*3*len(mesh.faces), np.array(mesh.faces.flatten(), dtype=np.int32), GL_STATIC_DRAW) self._buffers.extend([vertexbuffer, elementbuffer, normalbuffer]) else: vertexbuffer = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) glEnableVertexAttribArray(0) glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null) glBufferData(GL_ARRAY_BUFFER, 4*3*3*len(mesh.triangles), np.array(mesh.triangles.flatten(), dtype=np.float32), GL_STATIC_DRAW) normalbuffer = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) glEnableVertexAttribArray(1) glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null) normals = np.repeat(mesh.face_normals, 3, axis=0).astype(np.float32) normals = normals.flatten() glBufferData(GL_ARRAY_BUFFER, 4*len(normals), normals, GL_STATIC_DRAW) self._buffers.extend([vertexbuffer, normalbuffer]) glVertexAttribDivisor(0, 0) glVertexAttribDivisor(1, 0) modelbuf = glGenBuffers(1) self._buffers.extend([modelbuf]) glBindBuffer(GL_ARRAY_BUFFER, modelbuf) for i in range(4): glEnableVertexAttribArray(2 + i) glVertexAttribPointer(2 + i, 4, GL_FLOAT, GL_FALSE, 4*16, C_VOID_PS[i]) glVertexAttribDivisor(2 + i, 1) if isinstance(obj, InstancedSceneObject): glBufferData(GL_ARRAY_BUFFER, 4*16*len(obj.poses), None, GL_STATIC_DRAW) data = obj.raw_pose_data.flatten().astype(np.float32) glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16*len(obj.poses), data) else: glBufferData(GL_ARRAY_BUFFER, 4*16, None, GL_STATIC_DRAW) glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16, np.eye(4).flatten().astype(np.float32)) colorbuf = glGenBuffers(1) self._buffers.extend([colorbuf]) glBindBuffer(GL_ARRAY_BUFFER, colorbuf) glEnableVertexAttribArray(6) glVertexAttribPointer(6, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) glVertexAttribDivisor(6, 1) if isinstance(obj, InstancedSceneObject): glBufferData(GL_ARRAY_BUFFER, 4*3*len(obj.colors), None, GL_STATIC_DRAW) data = obj.colors.flatten().astype(np.float32) glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3*len(obj.colors), data) else: glBufferData(GL_ARRAY_BUFFER, 4*3, None, GL_STATIC_DRAW) glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3, obj.material.color.astype(np.float32)) glBindVertexArray(0) glBindBuffer(GL_ARRAY_BUFFER, 0) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) return VA_ids
Apache License 2.0
azure/autorest.python
docs/samples/specification/basic/generated/azure/basic/sample/operations/_http_success_operations.py
HttpSuccessOperations.head200
python
def head200( self, **kwargs ): cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_head200_request( template_url=self.head200.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {})
Return 200 status code if successful. :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/docs/samples/specification/basic/generated/azure/basic/sample/operations/_http_success_operations.py#L92-L125
import functools from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .._vendor import _convert_request if TYPE_CHECKING: from typing import Any, Callable, Dict, Generic, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() def build_head200_request( **kwargs ): url = kwargs.pop("template_url", '/http/success/200') return HttpRequest( method="HEAD", url=url, **kwargs ) def build_head204_request( **kwargs ): url = kwargs.pop("template_url", '/http/success/204') return HttpRequest( method="HEAD", url=url, **kwargs ) def build_head404_request( **kwargs ): url = kwargs.pop("template_url", '/http/success/404') return HttpRequest( method="HEAD", url=url, **kwargs ) class HttpSuccessOperations(object): def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/dvb_subtitle_input_stream.py
DvbSubtitleInputStream.selection_mode
python
def selection_mode(self, selection_mode): if selection_mode is not None: if not isinstance(selection_mode, StreamSelectionMode): raise TypeError("Invalid type for `selection_mode`, type has to be `StreamSelectionMode`") self._selection_mode = selection_mode
Sets the selection_mode of this DvbSubtitleInputStream. Specifies the algorithm for selecting a stream from the input file. Supported values for VOD encodings: AUTO, POSITION_ABSOLUTE, SUBTITLE_RELATIVE. Supported values for LIVE encodings: POSITION_ABSOLUTE :param selection_mode: The selection_mode of this DvbSubtitleInputStream. :type: StreamSelectionMode
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/dvb_subtitle_input_stream.py#L145-L159
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.input_stream import InputStream from bitmovin_api_sdk.models.stream_selection_mode import StreamSelectionMode import pprint import six class DvbSubtitleInputStream(InputStream): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, input_id=None, input_path=None, selection_mode=None, position=None): super(DvbSubtitleInputStream, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._input_id = None self._input_path = None self._selection_mode = None self._position = None self.discriminator = None if input_id is not None: self.input_id = input_id if input_path is not None: self.input_path = input_path if selection_mode is not None: self.selection_mode = selection_mode if position is not None: self.position = position @property def openapi_types(self): types = {} if hasattr(super(DvbSubtitleInputStream, self), 'openapi_types'): types = getattr(super(DvbSubtitleInputStream, self), 'openapi_types') types.update({ 'input_id': 'string_types', 'input_path': 'string_types', 'selection_mode': 'StreamSelectionMode', 'position': 'int' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(DvbSubtitleInputStream, self), 'attribute_map'): attributes = getattr(super(DvbSubtitleInputStream, self), 'attribute_map') attributes.update({ 'input_id': 'inputId', 'input_path': 'inputPath', 'selection_mode': 'selectionMode', 'position': 'position' }) return attributes @property def input_id(self): return self._input_id @input_id.setter def input_id(self, input_id): if input_id is not None: if not isinstance(input_id, string_types): raise TypeError("Invalid type for `input_id`, type has to be `string_types`") self._input_id = input_id @property def input_path(self): return self._input_path @input_path.setter def input_path(self, input_path): if input_path is not None: if not isinstance(input_path, string_types): raise TypeError("Invalid type for `input_path`, type has to be `string_types`") self._input_path = input_path @property def selection_mode(self): return self._selection_mode @selection_mode.setter
MIT License
berkeleyautomation/autolab_core
autolab_core/points.py
PointCloud.x_coords
python
def x_coords(self): return self._data[0, :]
:obj:`numpy.ndarray` of float : An array containing all x coordinates in the cloud.
https://github.com/berkeleyautomation/autolab_core/blob/cda081d2e07e3fe6cc9f3e8c86eea92330910d20/autolab_core/points.py#L645-L649
from abc import ABCMeta, abstractmethod import numbers import os import numpy as np from .primitives import Box class BagOfPoints(object): __metaclass__ = ABCMeta def __init__(self, data, frame): if not isinstance(data, np.ndarray): raise ValueError( "Must initialize bag of points with a numpy ndarray" ) if not isinstance(frame, str): raise ValueError("Must provide string name of frame of data") self._check_valid_data(data) self._data = self._preprocess_data(data) self._frame = frame @abstractmethod def _check_valid_data(self, data): pass def _preprocess_data(self, data): if len(data.shape) == 1: data = data[:, np.newaxis] return data @property def shape(self): return self._data.shape @property def frame(self): return self._frame @property def data(self): return self._data.squeeze() @property def dim(self): return self._data.shape[0] @property def num_points(self): return self._data.shape[1] def copy(self): return type(self)(self._data.copy(), self._frame) def save(self, filename): _, file_ext = os.path.splitext(filename) if file_ext == ".npy": np.save(filename, self._data) elif file_ext == ".npz": np.savez_compressed(filename, self._data) else: raise ValueError( "Extension %s not supported for point saves." % (file_ext) ) def load_data(filename): _, file_ext = os.path.splitext(filename) data = None if file_ext == ".npy": data = np.load(filename) elif file_ext == ".npz": data = np.load(filename)["arr_0"] else: raise ValueError( "Extension %s not supported for point reads" % (file_ext) ) return data def __getitem__(self, i): if isinstance(i, int): if i >= self.num_points: raise ValueError("Index %d is out of bounds" % (i)) return Point(self._data[:, i], self._frame) if isinstance(i, list): i = np.array(i) if isinstance(i, np.ndarray): if np.max(i) >= self.num_points: raise ValueError("Index %d is out of bounds" % (np.max(i))) return PointCloud(self._data[:, i], self._frame) if isinstance(i, slice): return PointCloud(self._data[:, i], self._frame) raise ValueError("Type %s not supported for indexing" % (type(i))) def __str__(self): return str(self.data) class BagOfVectors(BagOfPoints): pass class Point(BagOfPoints): def __init__(self, data, frame="unspecified"): BagOfPoints.__init__(self, data, frame) def _check_valid_data(self, data): if len(data.shape) == 2 and data.shape[1] != 1: raise ValueError( "Can only initialize Point from a single Nx1 array" ) @property def vector(self): return self._data.squeeze() @property def x(self): return self.vector[0] @property def y(self): return self.vector[1] @property def z(self): return self.vector[2] def __getitem__(self, dim): return self.vector[dim] def __add__(self, other_pt): if isinstance(other_pt, Point) and other_pt.dim == self.dim: if self._frame != other_pt.frame: raise ValueError("Frames must be the same for addition") return Point(self.data + other_pt.data, frame=self._frame) elif ( isinstance(other_pt, np.ndarray) and other_pt.shape == self.data.shape ): return Point(self.data + other_pt, frame=self._frame) raise ValueError( "Can only add to other Point objects or numpy ndarrays " "of the same dim" ) def __sub__(self, other_pt): return self + -1 * other_pt def __mul__(self, mult): if isinstance(mult, numbers.Number): return Point(mult * self._data, self._frame) raise ValueError( "Type %s not supported. Only scalar multiplication is supported" % (type(mult)) ) def __rmul__(self, mult): return self.__mul__(mult) def __div__(self, div): return self.__truediv__(div) def __rdiv__(self, div): return self.__rtruediv__(div) def __truediv__(self, div): if not isinstance(div, numbers.Number): raise ValueError( "Type %s not supported. Only scalar division is supported" % (type(div)) ) return self.__mul__(1.0 / div) def __rtruediv__(self, div): if isinstance(div, numbers.Number): return Point(div / self._data, self._frame) raise ValueError( "Type %s not supported. Only scalar division is supported" % (type(div)) ) @staticmethod def open(filename, frame="unspecified"): data = BagOfPoints.load_data(filename) return Point(data, frame) class Direction(BagOfVectors): def __init__(self, data, frame): BagOfPoints.__init__(self, data, frame) def _check_valid_data(self, data): if len(data.shape) == 2 and data.shape[1] != 1: raise ValueError( "Can only initialize Direction from a single Nx1 array" ) if np.abs(np.linalg.norm(data) - 1.0) > 1e-4: raise ValueError("Direction data must have norm=1.0") def orthogonal_basis(self): if self.dim == 3: x_arr = np.array([-self.data[1], self.data[0], 0]) if np.linalg.norm(x_arr) == 0: x_arr = np.array([self.data[2], 0, 0]) x_arr = x_arr / np.linalg.norm(x_arr) y_arr = np.cross(self.data, x_arr) return Direction(x_arr, frame=self.frame), Direction( y_arr, frame=self.frame ) raise NotImplementedError( "Orthogonal basis only supported for 3 dimensions" ) @staticmethod def open(filename, frame="unspecified"): data = BagOfPoints.load_data(filename) return Direction(data, frame) class Plane3D(object): def __init__(self, n, x0): if not isinstance(n, Direction) or n.dim != 3: raise ValueError("Plane normal must be a 3D direction") if not isinstance(x0, Point) or x0.dim != 3: raise ValueError("Plane offset must be a 3D point") self._n = n self._x0 = x0 def split_points(self, point_cloud): if not isinstance(point_cloud, PointCloud): raise ValueError("Can only split point clouds") above_plane = ( point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot( self._n ) > 0 ) above_plane = point_cloud.z_coords > 0 & above_plane below_plane = ( point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot( self._n ) <= 0 ) below_plane = point_cloud.z_coords > 0 & below_plane above_data = point_cloud.data[:, above_plane] below_data = point_cloud.data[:, below_plane] return PointCloud(above_data, point_cloud.frame), PointCloud( below_data, point_cloud.frame ) class PointCloud(BagOfPoints): def __init__(self, data, frame="unspecified"): BagOfPoints.__init__(self, data, frame) def _check_valid_data(self, data): if data.dtype.type != np.float32 and data.dtype.type != np.float64: raise ValueError( "Must initialize point clouds with a numpy float ndarray" ) if data.shape[0] != 3: raise ValueError( "Illegal data array passed to point cloud. " "Must have 3 coordinates" ) if len(data.shape) > 2: raise ValueError( "Illegal data array passed to point cloud. " "Must have 1 or 2 dimensions" ) @property
Apache License 2.0
cloudant/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Script/Interactive.py
SConsInteractiveCmd.do_clean
python
def do_clean(self, argv): return self.do_build(['build', '--clean'] + argv[1:])
\ clean [TARGETS] Clean (remove) the specified TARGETS and their dependencies. 'c' is a synonym.
https://github.com/cloudant/bigcouch/blob/8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe/couchjs/scons/scons-local-2.0.1/SCons/Script/Interactive.py#L269-L274
__revision__ = "src/engine/SCons/Script/Interactive.py 5134 2010/08/16 23:02:40 bdeegan" __doc__ = """ SCons interactive mode """ import cmd import copy import os import re import shlex import sys try: import readline except ImportError: pass class SConsInteractiveCmd(cmd.Cmd): synonyms = { 'b' : 'build', 'c' : 'clean', 'h' : 'help', 'scons' : 'build', 'sh' : 'shell', } def __init__(self, **kw): cmd.Cmd.__init__(self) for key, val in kw.items(): setattr(self, key, val) if sys.platform == 'win32': self.shell_variable = 'COMSPEC' else: self.shell_variable = 'SHELL' def default(self, argv): print "*** Unknown command: %s" % argv[0] def onecmd(self, line): line = line.strip() if not line: print self.lastcmd return self.emptyline() self.lastcmd = line if line[0] == '!': line = 'shell ' + line[1:] elif line[0] == '?': line = 'help ' + line[1:] if os.sep == '\\': line = line.replace('\\', '\\\\') argv = shlex.split(line) argv[0] = self.synonyms.get(argv[0], argv[0]) if not argv[0]: return self.default(line) else: try: func = getattr(self, 'do_' + argv[0]) except AttributeError: return self.default(argv) return func(argv) def do_build(self, argv): import SCons.Node import SCons.SConsign import SCons.Script.Main options = copy.deepcopy(self.options) options, targets = self.parser.parse_args(argv[1:], values=options) SCons.Script.COMMAND_LINE_TARGETS = targets if targets: SCons.Script.BUILD_TARGETS = targets else: SCons.Script.BUILD_TARGETS = SCons.Script._build_plus_default nodes = SCons.Script.Main._build_targets(self.fs, options, targets, self.target_top) if not nodes: return x = [] for n in nodes: x.extend(n.alter_targets()[0]) nodes.extend(x) SCons.Script.Main.progress_display("scons: Clearing cached node information ...") seen_nodes = {} def get_unseen_children(node, parent, seen_nodes=seen_nodes): def is_unseen(node, seen_nodes=seen_nodes): return node not in seen_nodes return list(filter(is_unseen, node.children(scan=1))) def add_to_seen_nodes(node, parent, seen_nodes=seen_nodes): seen_nodes[node] = 1 try: rfile_method = node.rfile except AttributeError: return else: rfile = rfile_method() if rfile != node: seen_nodes[rfile] = 1 for node in nodes: walker = SCons.Node.Walker(node, kids_func=get_unseen_children, eval_func=add_to_seen_nodes) n = walker.get_next() while n: n = walker.get_next() for node in seen_nodes.keys(): node.clear() node.set_state(SCons.Node.no_state) node.implicit = None SCons.SConsign.Reset() SCons.Script.Main.progress_display("scons: done clearing node information.")
Apache License 2.0
google-research/tf-slim
tf_slim/summaries.py
add_scalar_summary
python
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False): collections = [] if print_summary else None summary_name = _get_summary_name(tensor, name, prefix) op = summary.scalar( name=summary_name, tensor=tensor, collections=collections) if print_summary: op = logging_ops.Print(op, [tensor], summary_name) ops.add_to_collection(ops.GraphKeys.SUMMARIES, op) return op
Adds a scalar summary for the given tensor. Args: tensor: a variable or op tensor. name: the optional name for the summary. prefix: An optional prefix for the summary names. print_summary: If `True`, the summary is printed to stdout when the summary is computed. Returns: A scalar `Tensor` of type `string` whose contents are the serialized `Summary` protocol buffer.
https://github.com/google-research/tf-slim/blob/77b441267e27359e94a641b906f07afc25f69b13/tf_slim/summaries.py#L119-L143
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import nn_impl as nn from tensorflow.python.summary import summary __all__ = [ 'add_histogram_summary', 'add_image_summary', 'add_scalar_summary', 'add_zero_fraction_summary', 'add_histogram_summaries', 'add_image_summaries', 'add_scalar_summaries', 'add_zero_fraction_summaries' ] def _get_summary_name(tensor, name=None, prefix=None, postfix=None): if not name: name = tensor.op.name if prefix: name = prefix + '/' + name if postfix: name = name + '/' + postfix return name def add_histogram_summary(tensor, name=None, prefix=None): return summary.histogram( _get_summary_name(tensor, name, prefix), tensor) def add_image_summary(tensor, name=None, prefix=None, print_summary=False): summary_name = _get_summary_name(tensor, name, prefix) collections = [] if print_summary else None op = summary.image( name=summary_name, tensor=tensor, collections=collections) if print_summary: op = logging_ops.Print(op, [tensor], summary_name) ops.add_to_collection(ops.GraphKeys.SUMMARIES, op) return op
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_persistent_volume_claim_spec.py
V1PersistentVolumeClaimSpec.volume_name
python
def volume_name(self): return self._volume_name
Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501 VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501 :return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501 :rtype: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_persistent_volume_claim_spec.py#L218-L226
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1PersistentVolumeClaimSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'access_modes': 'list[str]', 'data_source': 'V1TypedLocalObjectReference', 'resources': 'V1ResourceRequirements', 'selector': 'V1LabelSelector', 'storage_class_name': 'str', 'volume_mode': 'str', 'volume_name': 'str' } attribute_map = { 'access_modes': 'accessModes', 'data_source': 'dataSource', 'resources': 'resources', 'selector': 'selector', 'storage_class_name': 'storageClassName', 'volume_mode': 'volumeMode', 'volume_name': 'volumeName' } def __init__(self, access_modes=None, data_source=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._access_modes = None self._data_source = None self._resources = None self._selector = None self._storage_class_name = None self._volume_mode = None self._volume_name = None self.discriminator = None if access_modes is not None: self.access_modes = access_modes if data_source is not None: self.data_source = data_source if resources is not None: self.resources = resources if selector is not None: self.selector = selector if storage_class_name is not None: self.storage_class_name = storage_class_name if volume_mode is not None: self.volume_mode = volume_mode if volume_name is not None: self.volume_name = volume_name @property def access_modes(self): return self._access_modes @access_modes.setter def access_modes(self, access_modes): self._access_modes = access_modes @property def data_source(self): return self._data_source @data_source.setter def data_source(self, data_source): self._data_source = data_source @property def resources(self): return self._resources @resources.setter def resources(self, resources): self._resources = resources @property def selector(self): return self._selector @selector.setter def selector(self, selector): self._selector = selector @property def storage_class_name(self): return self._storage_class_name @storage_class_name.setter def storage_class_name(self, storage_class_name): self._storage_class_name = storage_class_name @property def volume_mode(self): return self._volume_mode @volume_mode.setter def volume_mode(self, volume_mode): self._volume_mode = volume_mode @property
Apache License 2.0
rsmusllp/king-phisher
king_phisher/client/widget/managers.py
TimeSelectorButtonManager.time
python
def time(self, value): if not isinstance(value, datetime.time): raise TypeError('argument 1 must be a datetime.time instance') self._hour_spin.set_value(value.hour) self._minute_spin.set_value(value.minute) self.button.set_label(self._time_format.format(time=value))
:param value: value from self.popover.gobjects['spinbutton_xx'] :return: The new time value to self.time
https://github.com/rsmusllp/king-phisher/blob/6acbbd856f849d407cc904c075441e0cf13c25cf/king_phisher/client/widget/managers.py#L452-L461
import collections import datetime import functools from king_phisher import utilities from king_phisher.client import gui_utilities from gi.repository import Gdk from gi.repository import Gtk class ButtonGroupManager(object): def __init__(self, glade_gobject, widget_type, group_name): utilities.assert_arg_type(glade_gobject, gui_utilities.GladeGObject) self.group_name = group_name name_prefix = widget_type + '_' + self.group_name + '_' self.buttons = utilities.FreezableDict() for gobj_name in glade_gobject.dependencies.children: if not gobj_name.startswith(name_prefix): continue button_name = gobj_name[len(name_prefix):] self.buttons[button_name] = glade_gobject.gobjects[gobj_name] if not len(self.buttons): raise ValueError('found no ' + widget_type + ' of group: ' + self.group_name) self.buttons.freeze() def __repr__(self): return "<{0} group_name={1!r} active={2!r} >".format(self.__class__.__name__, self.group_name, self.__str__()) class RadioButtonGroupManager(ButtonGroupManager): def __init__(self, glade_gobject, group_name): super(RadioButtonGroupManager, self).__init__(glade_gobject, 'radiobutton', group_name) def __str__(self): return self.get_active() or '' def get_active(self): for name, button in self.buttons.items(): if button.get_active(): return name return def set_active(self, button): button = self.buttons[button] button.set_active(True) button.toggled() class ToggleButtonGroupManager(ButtonGroupManager): def __str__(self): return ', '.join(name for name, active in self.get_active().items() if active) def get_active(self): return {name: button.get_active() for name, button in self.buttons.items()} def set_active(self, buttons): for name, active in buttons.items(): button = self.buttons.get(name) if button is None: raise ValueError('invalid button name: ' + name) button.set_active(active) class MenuManager(object): __slots__ = ('menu', 'items') def __init__(self, menu=None): if menu is None: menu = Gtk.Menu() menu.show() self.menu = menu self.items = collections.OrderedDict() def __getitem__(self, label): return self.items[label] def __setitem__(self, label, menu_item): return self.append_item(menu_item, set_show=False) def append(self, label, activate=None, activate_args=()): if label in self.items: raise RuntimeError('label already exists in menu items') menu_item = Gtk.MenuItem.new_with_label(label) self.items[label] = menu_item self.append_item(menu_item) if activate: menu_item.connect('activate', activate, *activate_args) return menu_item def append_item(self, menu_item, set_show=True): if set_show: menu_item.show() self.menu.append(menu_item) return menu_item def append_submenu(self, label): submenu = self.__class__() submenu_item = Gtk.MenuItem.new_with_label(label) submenu_item.set_submenu(submenu.menu) self.append_item(submenu_item) return submenu class TreeViewManager(object): def __init__(self, treeview, selection_mode=None, cb_delete=None, cb_refresh=None): self.treeview = treeview self.cb_delete = cb_delete self.cb_refresh = cb_refresh self.column_titles = collections.OrderedDict() self.column_views = {} self.treeview.connect('key-press-event', self.signal_key_press_event) if selection_mode is None: selection_mode = Gtk.SelectionMode.SINGLE treeview.get_selection().set_mode(selection_mode) self._menu_items = {} def _call_cb_delete(self): if not self.cb_delete: return selection = self.treeview.get_selection() if not selection.count_selected_rows(): return self.cb_delete(self.treeview, selection) def get_popup_menu(self, handle_button_press=True): popup_copy_submenu = self.get_popup_copy_submenu() popup_menu = Gtk.Menu.new() menu_item = Gtk.MenuItem.new_with_label('Copy') menu_item.set_submenu(popup_copy_submenu) popup_menu.append(menu_item) self._menu_items['Copy'] = menu_item if self.cb_delete: menu_item = Gtk.SeparatorMenuItem() popup_menu.append(menu_item) menu_item = Gtk.MenuItem.new_with_label('Delete') menu_item.connect('activate', self.signal_activate_popup_menu_delete) popup_menu.append(menu_item) self._menu_items['Delete'] = menu_item popup_menu.show_all() if handle_button_press: self.treeview.connect('button-press-event', self.signal_button_pressed, popup_menu) return popup_menu def get_popup_copy_submenu(self): copy_menu = Gtk.Menu.new() for column_title, store_id in self.column_titles.items(): menu_item = Gtk.MenuItem.new_with_label(column_title) menu_item.connect('activate', self.signal_activate_popup_menu_copy, store_id) copy_menu.append(menu_item) if len(self.column_titles) > 1: menu_item = Gtk.SeparatorMenuItem() copy_menu.append(menu_item) menu_item = Gtk.MenuItem.new_with_label('All') menu_item.connect('activate', self.signal_activate_popup_menu_copy, self.column_titles.values()) copy_menu.append(menu_item) return copy_menu def set_column_color(self, background=None, foreground=None, column_titles=None): if background is None and foreground is None: raise RuntimeError('either background of foreground must be set') if column_titles is None: column_titles = self.column_titles.keys() elif isinstance(column_titles, str): column_titles = (column_titles,) for column_title in column_titles: column = self.column_views[column_title] renderer = column.get_cells()[0] if background is not None: column.add_attribute(renderer, 'background-rgba', background) column.add_attribute(renderer, 'background-set', True) if foreground is not None: column.add_attribute(renderer, 'foreground-rgba', foreground) column.add_attribute(renderer, 'foreground-set', True) def set_column_titles(self, column_titles, column_offset=0, renderers=None): self.column_titles.update((v, k) for (k, v) in enumerate(column_titles, column_offset)) columns = gui_utilities.gtk_treeview_set_column_titles(self.treeview, column_titles, column_offset=column_offset, renderers=renderers) for store_id, column_title in enumerate(column_titles, column_offset): self.column_views[column_title] = columns[store_id] return columns def signal_button_pressed(self, treeview, event, popup_menu): if not (event.type == Gdk.EventType.BUTTON_PRESS and event.button == Gdk.BUTTON_SECONDARY): return selection = treeview.get_selection() sensitive = bool(selection.count_selected_rows()) for menu_item in self._menu_items.values(): menu_item.set_sensitive(sensitive) popup_menu.popup(None, None, functools.partial(gui_utilities.gtk_menu_position, event), None, event.button, event.time) return True def signal_key_press_event(self, treeview, event): if event.type != Gdk.EventType.KEY_PRESS: return keyval = event.get_keyval()[1] if event.get_state() == Gdk.ModifierType.CONTROL_MASK: if keyval == Gdk.KEY_c and self.column_titles: gui_utilities.gtk_treeview_selection_to_clipboard(treeview, list(self.column_titles.values())[0]) elif keyval == Gdk.KEY_F5 and self.cb_refresh: self.cb_refresh() elif keyval == Gdk.KEY_Delete: self._call_cb_delete() def signal_activate_popup_menu_copy(self, menuitem, column_ids): gui_utilities.gtk_treeview_selection_to_clipboard(self.treeview, column_ids) def signal_activate_popup_menu_delete(self, menuitem): self._call_cb_delete() class _TimeSelector(gui_utilities.GladeGObject): dependencies = gui_utilities.GladeDependencies( children=( 'spinbutton_hour', 'spinbutton_minute' ), top_level=( 'ClockHourAdjustment', 'ClockMinuteAdjustment' ), name='TimeSelector' ) top_gobject = 'popover' def signal_spinbutton_output(self, spinbutton): adjustment = spinbutton.get_adjustment() value = adjustment.get_value() spinbutton.set_text("{0:02.0f}".format(value)) return True class TimeSelectorButtonManager(object): def __init__(self, application, button, value=None): self.popover = _TimeSelector(application) self.button = button self.application = application self._time_format = "{time.hour:02}:{time.minute:02}" self._hour_spin = self.popover.gobjects['spinbutton_hour'] self._hour_spin.connect('value-changed', lambda _: self.button.set_label(self._time_format.format(time=self.time))) self._minute_spin = self.popover.gobjects['spinbutton_minute'] self._minute_spin.connect('value-changed', lambda _: self.button.set_label(self._time_format.format(time=self.time))) self.time = value or datetime.time(0, 0) self.popover.popover.set_relative_to(self.button) self.popover.popover.connect('closed', lambda _: self.button.set_active(False)) self.button.connect('toggled', self.signal_button_toggled) def __repr__(self): return "<{0} time='{1:%H:%M}' >".format(self.__class__.__name__, self.time) def signal_button_toggled(self, _): if self.button.get_active(): self.popover.popover.popup() @property def time(self): return datetime.time(self._hour_spin.get_value_as_int(), self._minute_spin.get_value_as_int()) @time.setter
BSD 3-Clause New or Revised License
bayespy/bayespy
bayespy/inference/vmp/nodes/deterministic.py
Deterministic._get_id_list
python
def _get_id_list(self): id_list = [] for parent in self.parents: id_list = id_list + parent._get_id_list() return id_list
Returns the stochastic ID list. This method is used to check that same stochastic nodes are not direct parents of a node several times. It is only valid if there are intermediate stochastic nodes. To put it another way: each ID corresponds to one factor q(..) in the posterior approximation. Different IDs mean different factors, thus they mean independence. The parents must have independent factors. Stochastic nodes should return their unique ID. Deterministic nodes should return the IDs of their parents. Constant nodes should return empty list of IDs.
https://github.com/bayespy/bayespy/blob/0e6e6130c888a4295cc9421d61d4ad27b2960ebb/bayespy/inference/vmp/nodes/deterministic.py#L41-L60
import functools import numpy as np from bayespy.utils import misc from .node import Node, Moments class Deterministic(Node): def __init__(self, *args, **kwargs): super().__init__(*args, plates=None, notify_parents=False, **kwargs)
MIT License