repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
derfies/panda3d-editor
src/pandaEditor/selection.py
Selection.StopDragSelect
python
def StopDragSelect(self): self.marquee.Stop() nps = [] for np in self.rootNp.findAllMatches('**'): pick_np = self.GetPickableNodePath(np) if ( pick_np is not None and self.marquee.IsNodePathInside(pick_np) and pick_np not in nps ): nps.append(pick_np) np = self.GetNodePathUnderMouse() if np is not None and pick_np not in nps: nps.append(np) comps = [get_base().node_manager.wrap(np) for np in nps] if self.append: old_comps = self.comps for comp in comps: if comp in self.comps: old_comps.remove(comp) else: old_comps.append(comp) comps = old_comps return comps
Stop the marquee and get all the node paths under it with the correct tag. Also append any node which was under the mouse at the end of the operation.
https://github.com/derfies/panda3d-editor/blob/a50939bd4bfa5c22d27a9ddee090717e8d95f404/src/pandaEditor/selection.py#L142-L180
from direct.showbase.PythonUtil import getBase as get_base import panda3d.core as pm from p3d.object import Object from p3d.marquee import Marquee from p3d.mouse import MOUSE_CTRL from p3d.mousePicker import MousePicker from nodes.constants import TAG_IGNORE, TAG_PICKABLE class Selection(Object): BBOX_TAG = 'bbox' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.comps = [] self.marquee = Marquee('marquee', *args, **kwargs) bitMask = pm.GeomNode.getDefaultCollideMask() | pm.CollisionNode.getDefaultCollideMask() self.picker = MousePicker( 'picker', *args, fromCollideMask=bitMask, **kwargs ) def get(self): return self.comps @property def node_paths(self): return [ comp.data for comp in self.comps if isinstance(comp.data, pm.NodePath) ] def clear(self): for comp in self.comps: comp.on_deselect() self.comps = [] def add(self, comps): for comp in comps: if comp in self.comps: continue comp.on_select() self.comps.append(comp) def remove(self, comps): for comp in self.comps: comp.on_deselect() self.comps = [comp for comp in self.comps if comp not in comps] def select_parent(self): comps = [] for comp in self.comps: pcomp = comp.parent if pcomp.data != get_base().scene: comps.append(pcomp) else: comps.append(comp) return comps def select_child(self): comps = [] for comp in self.comps: if comp.children: comps.append(comp.children[0]) else: comps.append(comp) return comps def select_prev(self): comps = [] for comp in self.comps: children = comp.parent.children index = children.index(comp) - 1 if index < 0: index = len(children) - 1 comps.append(children[index]) return comps def select_next(self): comps = [] for comp in self.comps: children = comp.parent.children index = children.index(comp) + 1 if index > len(children) - 1: index = 0 comps.append(children[index]) return comps def StartDragSelect(self, append=False): if self.marquee.mouseWatcherNode.hasMouse(): self.append = append self.marquee.Start()
MIT License
mypal/ha-dsair
custom_components/ds_air/climate.py
DsAir.fan_modes
python
def fan_modes(self) -> Optional[List[str]]: return FAN_LIST
Return the list of available fan modes. Requires SUPPORT_FAN_MODE.
https://github.com/mypal/ha-dsair/blob/73e5a8322dd680b891241274b112987a764a6e3e/custom_components/ds_air/climate.py#L266-L271
import logging from typing import Optional, List import voluptuous as vol from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate import PLATFORM_SCHEMA from homeassistant.components.climate.const import ( SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_HUMIDITY, HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY) from homeassistant.config_entries import ConfigEntry from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, CONF_HOST, CONF_PORT from homeassistant.core import HomeAssistant, Event from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.event import async_track_state_change_event from .const import DOMAIN from .ds_air_service.config import Config from .ds_air_service.ctrl_enum import EnumControl from .ds_air_service.dao import AirCon, AirConStatus from .ds_air_service.display import display SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE | SUPPORT_SWING_MODE | SUPPORT_TARGET_HUMIDITY FAN_LIST = ['最弱', '稍弱', '中等', '稍强', '最强', '自动'] SWING_LIST = ['➡️', '↘️', '⬇️', '↙️', '⬅️', '↔️', '🔄'] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_PORT): cv.port }) _LOGGER = logging.getLogger(__name__) def _log(s: str): s = str(s) for i in s.split("\n"): _LOGGER.debug(i) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: from .ds_air_service.service import Service climates = [] for aircon in Service.get_aircons(): climates.append(DsAir(aircon)) async_add_entities(climates) link = entry.options.get("link") sensor_map = {} if link is not None: for i in link: if i.get("sensor") is not None: climate = None for j in climates: if i.get("climate") == j.name: climate = j break if sensor_map.get(i.get("sensor")) is not None: sensor_map[i.get("sensor")].append(climate) else: sensor_map[i.get("sensor")] = [climate] async def listener(event: Event): for climate in sensor_map[event.data.get("entity_id")]: climate.update_cur_temp(event.data.get("new_state").state) remove_listener = async_track_state_change_event(hass, list(sensor_map.keys()), listener) hass.data[DOMAIN]["listener"] = remove_listener for entity_id in sensor_map.keys(): state = hass.states.get(entity_id) if state is not None: for climate in sensor_map[entity_id]: climate.update_cur_temp(state.state) class DsAir(ClimateEntity): def __init__(self, aircon: AirCon): _log('create aircon:') _log(str(aircon.__dict__)) _log(str(aircon.status.__dict__)) self._name = aircon.alias self._device_info = aircon self._unique_id = aircon.unique_id self._link_cur_temp = False self._cur_temp = None from .ds_air_service.service import Service Service.register_status_hook(aircon, self._status_change_hook) def _status_change_hook(self, **kwargs): _log('hook:') if kwargs.get('aircon') is not None: aircon: AirCon = kwargs['aircon'] aircon.status = self._device_info.status self._device_info = aircon _log(display(self._device_info)) if kwargs.get('status') is not None: status: AirConStatus = self._device_info.status new_status: AirConStatus = kwargs['status'] if new_status.mode is not None: status.mode = new_status.mode if new_status.switch is not None: status.switch = new_status.switch if new_status.humidity is not None: status.humidity = new_status.humidity if new_status.air_flow is not None: status.air_flow = new_status.air_flow if new_status.fan_direction1 is not None: status.fan_direction1 = new_status.fan_direction1 if new_status.fan_direction2 is not None: status.fan_direction2 = new_status.fan_direction2 if new_status.setted_temp is not None: status.setted_temp = new_status.setted_temp if new_status.current_temp is not None: status.current_temp = new_status.current_temp if new_status.breathe is not None: status.breathe = new_status.breathe _log(display(self._device_info.status)) self.schedule_update_ha_state() def update_cur_temp(self, value): self._link_cur_temp = value is not None try: self._cur_temp = float(value) except ValueError: self.schedule_update_ha_state() @property def should_poll(self): return False @property def name(self): return self._name @property def temperature_unit(self): return TEMP_CELSIUS @property def target_humidity(self): return self._device_info.status.humidity.value @property def hvac_action(self): return None @property def hvac_mode(self) -> str: if self._device_info.status.switch == EnumControl.Switch.OFF: return HVAC_MODE_OFF else: return EnumControl.get_mode_name(self._device_info.status.mode.value) @property def hvac_modes(self): li = [] aircon = self._device_info if aircon.cool_mode: li.append(HVAC_MODE_COOL) if aircon.heat_mode or aircon.pre_heat_mode: li.append(HVAC_MODE_HEAT) if aircon.auto_dry_mode or aircon.dry_mode or aircon.more_dry_mode: li.append(HVAC_MODE_DRY) if aircon.ventilation_mode: li.append(HVAC_MODE_FAN_ONLY) if aircon.relax_mode or aircon.auto_mode: li.append(HVAC_MODE_AUTO) if aircon.sleep_mode: li.append(HVAC_MODE_HEAT_COOL) li.append(HVAC_MODE_OFF) return li @property def current_temperature(self): if self._link_cur_temp: return self._cur_temp else: if Config.is_c611: return None else: return self._device_info.status.current_temp / 10 @property def target_temperature(self): return self._device_info.status.setted_temp / 10 @property def target_temperature_step(self): return 1 @property def target_temperature_high(self): return None @property def target_temperature_low(self): return None @property def current_humidity(self): return None @property def preset_mode(self) -> Optional[str]: return None @property def preset_modes(self) -> Optional[List[str]]: return None @property def is_aux_heat(self): return None @property def fan_mode(self): return EnumControl.get_air_flow_name(self._device_info.status.air_flow.value) @property
MIT License
virtualabs/btlejack
btlejack/supervisors.py
ConnectionRecovery.on_hopincrement
python
def on_hopincrement(self, increment): print('Increment: %d' % increment)
Hop increment has been recovered.
https://github.com/virtualabs/btlejack/blob/7cd784a5c9ead820bdc1b1745d2d8e0773e6facb/btlejack/supervisors.py#L364-L368
from btlejack.jobs import SingleSnifferInterface, MultiSnifferInterface from btlejack.session import BtlejackSession, BtlejackSessionError from btlejack.packets import * from btlejack.link import DeviceError class Supervisor(object): def stop(self): pass def process_packets(self): packets = self.interface.read_packet() if len(packets) > 0: for pkt in packets: pkt = PacketRegistry.decode(pkt) self.on_packet_received(pkt) def on_packet_received(self, packet): if isinstance(packet, VerbosePacket): self.on_verbose(packet) elif isinstance(packet, DebugPacket): self.on_debug(packet) def send_packet(self, packet): self.interface.send_packet(packet) def on_ll_packet(self, packet): pass def on_verbose(self, packet): print('V:'+str(packet)) def on_debug(self, packet): print('D:'+str(packet)) class AccessAddressSniffer(Supervisor): def __init__(self, devices=None, baudrate=115200): super().__init__() if devices is not None: if len(devices) >= 1: self.interface = SingleSnifferInterface(devices[0], baudrate) else: raise DeviceError('No device provided') else: self.interface = SingleSnifferInterface() self.interface.reset() self.aad = {} self.interface.scan_access_addresses() def on_packet_received(self, packet): if isinstance(packet, VerbosePacket) or isinstance(packet, DebugPacket): super().on_packet_received(packet) else: if isinstance(packet, AccessAddressNotification): if packet.access_address not in self.aad: self.aad[packet.access_address] = 1 else: self.aad[packet.access_address] += 1 self.on_access_address( packet.access_address, packet.rssi, self.aad[packet.access_address] ) def on_access_address(self, address, rssi, nb_packets): print( '[ -%3d dBm] 0x%08x | pkts: %d' % ( rssi, address, nb_packets ) ) class ConnectionRecovery(Supervisor): STATE_IDLE = 0 STATE_RECOVER_CRC = 1 STATE_RECOVER_CHM = 2 STATE_RECOVER_HOPINTER = 3 STATE_RECOVER_HOPINC = 4 STATE_FOLLOWING = 5 STATE_HIJACKING = 6 STATE_HIJACKED = 7 STATE_RECOVER_CCHM = 8 STATE_RECOVER_PRNG = 9 def __init__(self, access_address, channel_map=None, hop_interval=None, crc=None, devices=None, baudrate=115200, timeout=0, v5=False): super().__init__() try: self.session = BtlejackSession.get_instance() except BtlejackSessionError as session_error: self.session = None if devices is not None: self.interface = MultiSnifferInterface(len(devices), baudrate, devices, v5=v5) else: self.interface = MultiSnifferInterface(999, v5=v5) self.state = self.STATE_RECOVER_CRC self.chm_provided = (channel_map is not None) self.crc_provide = (crc is not None) self.hop_provided = (hop_interval is not None) self.access_address = access_address self.hop_interval = hop_interval self.chm = channel_map self.packet_sent = False self.crc = crc self.cchm_notifications = 0 self.cchm = 0 self.timeout = timeout self.v5 = v5 if self.crc is not None: if self.session is not None: self.session.add_connection( self.access_address, {'crcinit': self.crc} ) self.session.save() if self.chm is not None: if self.v5 and self.hop_interval is not None: self.state = self.STATE_RECOVER_PRNG self.interface.recover_prng(self.access_address, self.crc, self.chm, self.hop_interval) else: self.state = self.STATE_RECOVER_HOPINTER self.interface.recover_hop(access_address, self.crc, self.chm) else: self.state = self.STATE_RECOVER_CCHM self.interface.recover_chm(access_address, self.crc, self.timeout) else: self.state = self.STATE_RECOVER_CRC self.interface.recover_crcinit(access_address) def jam(self): if self.state == self.STATE_FOLLOWING: self.interface.enable_jamming(True) def hijack(self): if self.state == self.STATE_FOLLOWING: self.state = self.STATE_HIJACKING self.interface.enable_hijacking(True) def on_packet_received(self, packet): if isinstance(packet, VerbosePacket) or isinstance(packet, DebugPacket): super().on_packet_received(packet) elif isinstance(packet, ConnectionLostNotification): self.on_connection_lost() else: if self.state == self.STATE_RECOVER_CRC: if isinstance(packet, CrcNotification): self.on_crc(packet.crc) self.crc = packet.crc if self.session is not None: self.session.add_connection( self.access_address, {'crcinit': self.crc} ) self.session.save() if self.chm_provided: if self.v5: self.state = self.STATE_RECOVER_PRNG else: self.state = self.STATE_RECOVER_HOPINTER else: if self.interface.get_nb_interfaces() >= 1: self.state = self.STATE_RECOVER_CCHM self.interface.reset() self.cchm_notifications = 0 self.cchm = 0 self.interface.recover_chm( self.access_address, self.crc, self.timeout ) else: self.state = self.STATE_RECOVER_CHM elif self.state == self.STATE_RECOVER_CHM: if isinstance(packet, ChannelMapNotification): self.on_chm(packet.channel_map) if not self.v5: if self.hop_provided: self.state = self.STATE_RECOVER_HOPINC else: self.state = self.STATE_RECOVER_HOPINTER else: self.state = self.STATE_RECOVER_HOPINTER elif self.state == self.STATE_RECOVER_PRNG: if isinstance(packet, Csa2PrngNotification): self.state = self.STATE_FOLLOWING self.on_prng_state(packet.prng_state) elif self.state == self.STATE_RECOVER_CCHM: if isinstance(packet, ChannelMapNotification): self.cchm |= packet.channel_map self.cchm_notifications += 1 if self.cchm_notifications == self.interface.get_nb_interfaces(): self.state = self.STATE_RECOVER_HOPINTER self.on_chm(self.cchm) elif self.state == self.STATE_RECOVER_HOPINTER: if isinstance(packet, HopIntervalNotification): self.on_hopinterval(packet.interval) if not self.v5: self.state = self.STATE_RECOVER_HOPINC else: self.state = self.STATE_RECOVER_PRNG elif self.state == self.STATE_RECOVER_HOPINC: if isinstance(packet, HopIncrementNotification): self.state = self.STATE_FOLLOWING self.on_hopincrement(packet.increment) elif self.state == self.STATE_FOLLOWING: self.on_ll_packet(packet) elif self.state == self.STATE_HIJACKING: if isinstance(packet, HijackStatusNotification): if packet.status: self.state = self.STATE_HIJACKED self.on_hijacking_success() else: self.state = self.STATE_IDLE self.on_hijacking_failed() elif self.state == self.STATE_HIJACKED: if isinstance(packet, SendPacketResponse): self.sent_packet = False else: self.on_ll_packet(packet) def on_crc(self, crc): print('CRC: %06x' % crc) def on_chm(self, chm): self.state = self.STATE_RECOVER_HOPINTER self.chm = chm if not self.v5: self.recover_hop( self.access_address, self.crc, self.chm ) else: self.state = self.STATE_RECOVER_PRNG def on_hopinterval(self, interval): print('Interval: %d' % interval)
MIT License
databand-ai/dbnd
modules/dbnd-airflow/src/dbnd_airflow/tracking/airflow_patching.py
add_tracking_to_policy
python
def add_tracking_to_policy(): try: _add_tracking_to_policy() except Exception as e: logging.exception("Failed to add tracking in policy")
Add tracking to all tasks as part of airflow policy
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd-airflow/src/dbnd_airflow/tracking/airflow_patching.py#L51-L56
import logging from dbnd_airflow.tracking.dbnd_dag_tracking import track_task def _wrap_policy_with_dbnd_track_task(policy): if policy and getattr(policy, "_dbnd_patched", None): return policy def dbnd_track_task_policy(task): policy(task) track_task(task) dbnd_track_task_policy._dbnd_patched = True return dbnd_track_task_policy def _patch_policy(module): if hasattr(module, "policy"): new_policy = _wrap_policy_with_dbnd_track_task(module.policy) module.policy = new_policy if hasattr(module, "task_policy"): new_policy = _wrap_policy_with_dbnd_track_task(module.task_policy) module.task_policy = new_policy def _add_tracking_to_policy(): try: import airflow_local_settings _patch_policy(airflow_local_settings) except ImportError: pass from airflow.models.dagbag import settings _patch_policy(settings)
Apache License 2.0
opsdroid/opsdroid
opsdroid/loader.py
Loader.pip_install_deps
python
def pip_install_deps(requirements_path): process = None command = [ "pip", "install", "--target={}".format(DEFAULT_MODULE_DEPS_PATH), "--ignore-installed", "-r", requirements_path, ] try: process = subprocess.Popen( command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except FileNotFoundError: _LOGGER.debug( _("Couldn't find the command 'pip', trying again with command 'pip3'.") ) try: command[0] = "pip3" process = subprocess.Popen( command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except FileNotFoundError: _LOGGER.debug( _("Couldn't find the command 'pip3', install of %s will be skipped."), str(requirements_path), ) if not process: raise OSError(_("Pip and pip3 not found, exiting...")) Loader._communicate_process(process) return True
Pip install a requirements.txt file and wait for finish. Args: requirements_path: string holding the path to the requirements.txt file located in the module's local repository Returns: bool: True if the requirements.txt installs successfully
https://github.com/opsdroid/opsdroid/blob/9a48364869ded7cdd2420b43b0c2c153e846439e/opsdroid/loader.py#L249-L295
import contextlib import importlib import importlib.util import json import logging import os import shutil import subprocess import sys import tempfile import urllib.request from collections.abc import Mapping from pkg_resources import iter_entry_points from opsdroid.helper import ( file_is_ipython_notebook, convert_ipynb_to_script, extract_gist_id, ) from opsdroid.configuration import validate_configuration from opsdroid.const import ( DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH, DEFAULT_MODULE_BRANCH, DEFAULT_MODULE_DEPS_PATH, ) _LOGGER = logging.getLogger(__name__) class Loader: def __init__(self, opsdroid): self.opsdroid = opsdroid self.modules_directory = None self.current_import_config = None _LOGGER.debug(_("Loaded loader.")) @staticmethod def import_module_from_spec(module_spec): module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) return module @staticmethod def import_module(config): if config.get("entrypoint"): _LOGGER.debug( _("Loading entry point-defined module for %s."), config["name"] ) return config["entrypoint"].load() module_spec = None namespaces = [ config["module"], config["module_path"] + "." + config["name"], config["module_path"], ] for namespace in namespaces: try: module_spec = importlib.util.find_spec(namespace) if module_spec: break except (ImportError, AttributeError): continue if module_spec: try: module = Loader.import_module_from_spec(module_spec) except Exception as e: _LOGGER.error( _("The following exception was raised while importing %s %s"), config["type"], config["module_path"], ) _LOGGER.error(str(e)) else: _LOGGER.debug( _("Loaded %s: %s."), config["type"], config["module_path"] ) return module _LOGGER.error( _("Failed to load %s: %s."), config["type"], config["module_path"] ) return None @classmethod def check_cache(cls, config): if "no-cache" in config and config["no-cache"]: _LOGGER.debug(_("'no-cache' set, removing %s."), config["install_path"]) cls.remove_cache(config) if "no-cache" not in config and cls._is_local_module(config): _LOGGER.debug( _( "Removing cache for local module %s, set 'no-cache: false' to disable this." ), config["install_path"], ) cls.remove_cache(config) @staticmethod def remove_cache(config): if os.path.isdir(config["install_path"]): shutil.rmtree(config["install_path"]) if os.path.isfile(config["install_path"] + ".py"): os.remove(config["install_path"] + ".py") @staticmethod def is_builtin_module(config): try: return importlib.util.find_spec( "opsdroid.{module_type}.{module_name}".format( module_type=config["type"], module_name=config["name"].lower() ) ) except ImportError: return False @staticmethod def build_module_import_path(config): if config["is_builtin"]: return "opsdroid" + "." + config["type"] + "." + config["name"].lower() return MODULES_DIRECTORY + "." + config["type"] + "." + config["name"] def build_module_install_path(self, config): return os.path.join(self.modules_directory, config["type"], config["name"]) @staticmethod def git_clone(git_url, install_path, branch, key_path=None): git_env = os.environ.copy() if key_path: git_env[ "GIT_SSH_COMMAND" ] = f"ssh -i {key_path} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" process = subprocess.Popen( ["git", "clone", "-b", branch, git_url, install_path], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=git_env, ) Loader._communicate_process(process) @staticmethod def git_pull(repository_path): process = subprocess.Popen( ["git", "-C", repository_path, "pull", "--ff-only"], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) Loader._communicate_process(process) @staticmethod
Apache License 2.0
huailiang/unity_pose3d
python/common/camera.py
project_to_2d
python
def project_to_2d(X, camera_params): assert X.shape[-1] == 3 assert len(camera_params.shape) == 2 assert camera_params.shape[-1] == 9 assert X.shape[0] == camera_params.shape[0] while len(camera_params.shape) < len(X.shape): camera_params = camera_params.unsqueeze(1) f = camera_params[..., :2] c = camera_params[..., 2:4] k = camera_params[..., 4:7] p = camera_params[..., 7:] XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1) r2 = torch.sum(XX[..., :2] ** 2, dim=len(XX.shape) - 1, keepdim=True) radial = 1 + torch.sum(k * torch.cat((r2, r2 ** 2, r2 ** 3), dim=len(r2.shape) - 1), dim=len(r2.shape) - 1, keepdim=True) tan = torch.sum(p * XX, dim=len(XX.shape) - 1, keepdim=True) XXX = XX * (radial + tan) + p * r2 return f * XXX + c
Project 3D points to 2D using the Human3.6M camera projection function. This is a differentiable and batched reimplementation of the original MATLAB script. Arguments: X -- 3D points in *camera space* to transform (N, *, 3) camera_params -- intrinsic parameteres (N, 2+2+3+2=9) focal length / principal point / radial_distortion / tangential_distortion
https://github.com/huailiang/unity_pose3d/blob/f2295d0515f021dfff42a48d6bc7e9ba36f373ce/python/common/camera.py#L51-L82
import numpy as np import torch from common.quaternion import qrot, qinverse from common.utils import wrap def normalize_screen_coordinates(X, w, h): assert X.shape[-1] == 2 return X / w * 2 - [1, h / w] def normalize_screen_coordinates_new(X, w, h): assert X.shape[-1] == 2 return (X - (w / 2, h / 2)) / (w / 2, h / 2) def image_coordinates_new(X, w, h): assert X.shape[-1] == 2 return (X * (w / 2, h / 2)) + (w / 2, h / 2) def image_coordinates(X, w, h): assert X.shape[-1] == 2 return (X + [1, h / w]) * w / 2 def world_to_camera(X, R, t): Rt = wrap(qinverse, R) return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t) def camera_to_world(X, R, t): return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
MIT License
brython-dev/brython
www/src/Lib/faulthandler.py
dump_traceback_later
python
def dump_traceback_later(*args,**kw): pass
dump_traceback_later(timeout, repeat=False, file=sys.stderrn, exit=False): dump the traceback of all threads in timeout seconds, or each timeout seconds if repeat is True. If exit is True, call _exit(1) which is not safe.
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/faulthandler.py#L149-L152
_EXCEPTION_ACCESS_VIOLATION = -1073741819 _EXCEPTION_INT_DIVIDE_BY_ZERO = -1073741676 _EXCEPTION_NONCONTINUABLE = 1 _EXCEPTION_NONCONTINUABLE_EXCEPTION = -1073741787 _EXCEPTION_STACK_OVERFLOW = -1073741571 class __loader__(object): __delattr__ = "<slot wrapper '__delattr__' of 'object' objects>" __dict__ = "{'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\n\n All methods are either class or static methods to avoid the need to\n instantiate the class.\n\n ', 'module_repr': <staticmethod object at 0x000001F9B17F15F8>, 'find_spec': <classmethod object at 0x000001F9B17F1630>, 'find_module': <classmethod object at 0x000001F9B17F1668>, 'create_module': <classmethod object at 0x000001F9B17F16A0>, 'exec_module': <classmethod object at 0x000001F9B17F16D8>, 'get_code': <classmethod object at 0x000001F9B17F1748>, 'get_source': <classmethod object at 0x000001F9B17F17B8>, 'is_package': <classmethod object at 0x000001F9B17F1828>, 'load_module': <classmethod object at 0x000001F9B17F1860>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>}" __dir__ = "<method '__dir__' of 'object' objects>" __eq__ = "<slot wrapper '__eq__' of 'object' objects>" __format__ = "<method '__format__' of 'object' objects>" __ge__ = "<slot wrapper '__ge__' of 'object' objects>" __getattribute__ = "<slot wrapper '__getattribute__' of 'object' objects>" __gt__ = "<slot wrapper '__gt__' of 'object' objects>" __hash__ = "<slot wrapper '__hash__' of 'object' objects>" __init__ = "<slot wrapper '__init__' of 'object' objects>" def __init_subclass__(*args,**kw): pass __le__ = "<slot wrapper '__le__' of 'object' objects>" __lt__ = "<slot wrapper '__lt__' of 'object' objects>" __module__ = """_frozen_importlib""" __ne__ = "<slot wrapper '__ne__' of 'object' objects>" def __new__(*args,**kw): pass __reduce__ = "<method '__reduce__' of 'object' objects>" __reduce_ex__ = "<method '__reduce_ex__' of 'object' objects>" __repr__ = "<slot wrapper '__repr__' of 'object' objects>" __setattr__ = "<slot wrapper '__setattr__' of 'object' objects>" __sizeof__ = "<method '__sizeof__' of 'object' objects>" __str__ = "<slot wrapper '__str__' of 'object' objects>" def __subclasshook__(*args,**kw): pass __weakref__ = "<attribute '__weakref__' of 'BuiltinImporter' objects>" create_module = "<bound method BuiltinImporter.create_module of <class '_frozen_importlib.BuiltinImporter'>>" exec_module = "<bound method BuiltinImporter.exec_module of <class '_frozen_importlib.BuiltinImporter'>>" find_module = "<bound method BuiltinImporter.find_module of <class '_frozen_importlib.BuiltinImporter'>>" find_spec = "<bound method BuiltinImporter.find_spec of <class '_frozen_importlib.BuiltinImporter'>>" get_code = "<bound method BuiltinImporter.get_code of <class '_frozen_importlib.BuiltinImporter'>>" get_source = "<bound method BuiltinImporter.get_source of <class '_frozen_importlib.BuiltinImporter'>>" is_package = "<bound method BuiltinImporter.is_package of <class '_frozen_importlib.BuiltinImporter'>>" load_module = "<bound method _load_module_shim of <class '_frozen_importlib.BuiltinImporter'>>" def module_repr(*args,**kw): pass __spec__ = "ModuleSpec(name='faulthandler', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')" def _fatal_error(*args,**kw): pass def _fatal_error_c_thread(*args,**kw): pass def _raise_exception(*args,**kw): pass def _read_null(*args,**kw): pass def _sigabrt(*args,**kw): pass def _sigfpe(*args,**kw): pass def _sigsegv(*args,**kw): pass def cancel_dump_traceback_later(*args,**kw): pass def disable(*args,**kw): pass def dump_traceback(*args,**kw): pass
BSD 3-Clause New or Revised License
contentful/contentful-management.py
contentful_management/locales_proxy.py
LocalesProxy.create
python
def create(self, attributes=None, **kwargs): return super(LocalesProxy, self).create(None, attributes)
Creates a locale with given attributes.
https://github.com/contentful/contentful-management.py/blob/658341bc5af529b00fa317362c0b6cca221d76e4/contentful_management/locales_proxy.py#L27-L32
from .client_proxy import ClientProxy from .locale import Locale class LocalesProxy(ClientProxy): @property def _resource_class(self): return Locale
MIT License
lisa-lab/pylearn2
pylearn2/models/mlp.py
MLP.get_monitoring_data_specs
python
def get_monitoring_data_specs(self): if not self.monitor_targets: return (self.get_input_space(), self.get_input_source()) space = CompositeSpace((self.get_input_space(), self.get_target_space())) source = (self.get_input_source(), self.get_target_source()) return (space, source)
Returns data specs requiring both inputs and targets. Returns ------- data_specs: TODO The data specifications for both inputs and targets.
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/models/mlp.py#L646-L661
__authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2012-2013, Universite de Montreal" __credits__ = ["Ian Goodfellow", "David Warde-Farley"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" import logging import math import operator import sys import warnings import numpy as np from theano.compat import six from theano.compat.six.moves import reduce, xrange from theano import config from theano.gof.op import get_debug_values from theano.sandbox.cuda import cuda_enabled from theano.sandbox.cuda.dnn import dnn_available, dnn_pool from theano.sandbox.rng_mrg import MRG_RandomStreams from theano.tensor.signal.pool import pool_2d import theano.tensor as T from pylearn2.compat import OrderedDict from pylearn2.costs.mlp import Default from pylearn2.expr.probabilistic_max_pooling import max_pool_channels if cuda_enabled and dnn_available(): try: from pylearn2.linear import cudnn2d as conv2d except ImportError: from pylearn2.linear import conv2d else: from pylearn2.linear import conv2d from pylearn2.linear.matrixmul import MatrixMul from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm from pylearn2.models.model import Model from pylearn2.monitor import get_monitor_doc from pylearn2.expr.nnet import arg_of_softmax from pylearn2.expr.nnet import pseudoinverse_softmax_numpy from pylearn2.space import CompositeSpace from pylearn2.space import Conv2DSpace from pylearn2.space import Space from pylearn2.space import VectorSpace, IndexSpace from pylearn2.utils import function from pylearn2.utils import is_iterable from pylearn2.utils import py_float_types from pylearn2.utils import py_integer_types from pylearn2.utils import safe_union from pylearn2.utils import safe_zip from pylearn2.utils import safe_izip from pylearn2.utils import sharedX from pylearn2.utils import wraps from pylearn2.utils import contains_inf from pylearn2.utils import isfinite from pylearn2.utils.data_specs import DataSpecsMapping from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision, compute_recall, compute_f1) from pylearn2.costs.mlp import L1WeightDecay as _L1WD from pylearn2.costs.mlp import WeightDecay as _WD logger = logging.getLogger(__name__) logger.debug("MLP changing the recursion limit.") sys.setrecursionlimit(40000) class Layer(Model): dropout_input_mask_value = 0. def get_mlp(self): if hasattr(self, 'mlp'): return self.mlp return None def set_mlp(self, mlp): assert self.get_mlp() is None self.mlp = mlp def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None): return OrderedDict() def fprop(self, state_below): raise NotImplementedError( str(type(self)) + " does not implement fprop.") def cost(self, Y, Y_hat): raise NotImplementedError( str(type(self)) + " does not implement mlp.Layer.cost.") def cost_from_cost_matrix(self, cost_matrix): raise NotImplementedError( str(type(self)) + " does not implement " "mlp.Layer.cost_from_cost_matrix.") def cost_matrix(self, Y, Y_hat): raise NotImplementedError( str(type(self)) + " does not implement mlp.Layer.cost_matrix") def set_weights(self, weights): raise NotImplementedError( str(type(self)) + " does not implement set_weights.") def get_biases(self): raise NotImplementedError( str(type(self)) + " does not implement " "get_biases (perhaps because the class has no biases).") def set_biases(self, biases): raise NotImplementedError( str(type(self)) + " does not implement " "set_biases (perhaps because the class has no biases).") def get_weights_format(self): raise NotImplementedError def get_weight_decay(self, coeff): raise NotImplementedError( str(type(self)) + " does not implement get_weight_decay.") def get_l1_weight_decay(self, coeff): raise NotImplementedError( str(type(self)) + " does not implement get_l1_weight_decay.") def set_input_space(self, space): raise NotImplementedError( str(type(self)) + " does not implement set_input_space.") class MLP(Layer): def __init__(self, layers, batch_size=None, input_space=None, input_source='features', target_source='targets', nvis=None, seed=None, layer_name=None, monitor_targets=True, **kwargs): super(MLP, self).__init__(**kwargs) self.seed = seed assert isinstance(layers, list) assert all(isinstance(layer, Layer) for layer in layers) assert len(layers) >= 1 self.layer_name = layer_name self.layer_names = set() for layer in layers: assert layer.get_mlp() is None if layer.layer_name in self.layer_names: raise ValueError("MLP.__init__ given two or more layers " "with same name: " + layer.layer_name) layer.set_mlp(self) self.layer_names.add(layer.layer_name) self.layers = layers self.batch_size = batch_size self.force_batch_size = batch_size self._input_source = input_source self._target_source = target_source self.monitor_targets = monitor_targets if input_space is not None or nvis is not None: self._nested = False self.setup_rng() assert layer_name is None if nvis is not None: input_space = VectorSpace(nvis) try: DataSpecsMapping((input_space, input_source)) except ValueError: raise ValueError("The structures of `input_space`, %s, and " "`input_source`, %s do not match. If you " "specified a CompositeSpace as an input, " "be sure to specify the data sources as well." % (input_space, input_source)) self.input_space = input_space self._update_layer_input_spaces() else: self._nested = True self.freeze_set = set([]) @property def input_source(self): assert not self._nested, "A nested MLP does not have an input source" return self._input_source @property def target_source(self): assert not self._nested, "A nested MLP does not have a target source" return self._target_source def setup_rng(self): assert not self._nested, "Nested MLPs should use their parent's RNG" if self.seed is None: self.seed = [2013, 1, 4] self.rng = np.random.RandomState(self.seed) @wraps(Layer.get_default_cost) def get_default_cost(self): return Default() @wraps(Layer.get_output_space) def get_output_space(self): return self.layers[-1].get_output_space() @wraps(Layer.get_target_space) def get_target_space(self): return self.layers[-1].get_target_space() @wraps(Layer.set_input_space) def set_input_space(self, space): if hasattr(self, "mlp"): assert self._nested self.rng = self.mlp.rng self.batch_size = self.mlp.batch_size self.input_space = space self._update_layer_input_spaces() def _update_layer_input_spaces(self): layers = self.layers try: layers[0].set_input_space(self.get_input_space()) except BadInputSpaceError as e: raise TypeError("Layer 0 (" + str(layers[0]) + " of type " + str(type(layers[0])) + ") does not support the MLP's " + "specified input space (" + str(self.get_input_space()) + " of type " + str(type(self.get_input_space())) + "). Original exception: " + str(e)) for i in xrange(1, len(layers)): layers[i].set_input_space(layers[i - 1].get_output_space()) def add_layers(self, layers): existing_layers = self.layers assert len(existing_layers) > 0 for layer in layers: assert layer.get_mlp() is None layer.set_mlp(self) if not self._nested or hasattr(self, 'input_space'): layer.set_input_space(existing_layers[-1].get_output_space()) existing_layers.append(layer) assert layer.layer_name not in self.layer_names self.layer_names.add(layer.layer_name) def freeze(self, parameter_set): self.freeze_set = self.freeze_set.union(parameter_set) @wraps(Layer.get_monitoring_channels) def get_monitoring_channels(self, data): if self.monitor_targets: X, Y = data else: X = data Y = None rval = self.get_layer_monitoring_channels(state_below=X, targets=Y) return rval @wraps(Layer.get_layer_monitoring_channels) def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None): rval = OrderedDict() state = state_below for layer in self.layers: state_below = state state = layer.fprop(state) args = [state_below, state] if layer is self.layers[-1] and targets is not None: args.append(targets) ch = layer.get_layer_monitoring_channels(*args) if not isinstance(ch, OrderedDict): raise TypeError(str((type(ch), layer.layer_name))) for key in ch: value = ch[key] doc = get_monitor_doc(value) if doc is None: doc = str(type(layer)) + ".get_monitoring_channels_from_state did" + " not provide any further documentation for" + " this channel." doc = 'This channel came from a layer called "' + layer.layer_name + '" of an MLP.\n' + doc value.__doc__ = doc rval[layer.layer_name + '_' + key] = value return rval
BSD 3-Clause New or Revised License
jambonsw/django-improved-user
setup.py
CustomCheckCommand.check_metadata
python
def check_metadata(self): metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if not metadata.author and not metadata.maintainer: missing.append('author') if self.enforce_email: missing.append('author_email') else: if (metadata.author and self.enforce_email and not metadata.author_email): missing.append('author_email') if (metadata.maintainer and self.enforce_email and not metadata.maintainer_email): missing.append('maintainer_email') if (metadata.author and metadata.maintainer and metadata.author == metadata.maintainer): self.warn( 'Maintainer should be omitted if identical to Author.\n' 'See https://www.python.org/dev/peps/pep-0345/' '#maintainer-email-optional') if (metadata.author_email and metadata.maintainer_email and metadata.author_email == metadata.maintainer_email): self.warn( 'Maintainer Email should be omitted if' "identical to Author's.\n" 'See https://www.python.org/dev/peps/pep-0345/' '#maintainer-email-optional') if missing: self.warn('missing required meta-data: %s' % ', '.join(missing))
Ensures that all required elements of meta-data are supplied. Specifically: name, version, URL, author or maintainer Warns if any are missing. If enforce-email option is true, author and/or maintainer must specify an email.
https://github.com/jambonsw/django-improved-user/blob/ae91086adf58aeca81d6e0fe1aeb4c7f3f4c2a77/setup.py#L52-L100
from distutils.command.check import check as CheckCommand from operator import attrgetter from os.path import abspath, dirname, join from setuptools import find_packages, setup from setuptools.command.test import test as TestCommand HERE = abspath(dirname(__file__)) def load_file_contents(file_path, as_list=True): abs_file_path = join(HERE, file_path) with open(abs_file_path, encoding='utf-8') as file_pointer: if as_list: return file_pointer.read().splitlines() return file_pointer.read() LONG_DESCRIPTION = ( load_file_contents('README.rst', as_list=False) .split('.. end-badges')[1] .lstrip() ) class CustomCheckCommand(CheckCommand): user_options = CheckCommand.user_options + [ ('disable-metadata', None, "don't check meta-data"), ('enforce-email', 'e', 'Ensure that all author/maintainer use email'), ] negative_opt = {'disable-metadata': 'metadata'} def initialize_options(self): super().initialize_options() self.enforce_email = 0
BSD 2-Clause Simplified License
lisa-lab/pylearn2
pylearn2/datasets/hdf5.py
alias_dict.__setitem__
python
def __setitem__(self, keys, value): assert isinstance(keys, (list, tuple, string_types)) if isinstance(keys, (list, tuple)): assert all([el is None or isinstance(el, string_types) for el in keys]) if isinstance(keys, (list, tuple)): if keys[1] is not None: if keys[0] in self.__a2k__ or keys[0] in super(alias_dict, self).keys(): raise Exception('The key is already used in the ' 'dictionary either as key or alias') if keys[1] in self.__a2k__ or keys[1] in super(alias_dict, self).keys(): raise Exception('The alias is already used in the ' 'dictionary either as key or alias') self.__k2a__[keys[0]] = keys[1] self.__a2k__[keys[1]] = keys[0] keys = keys[0] return super(alias_dict, self).__setitem__(keys, value)
Add an element to the dictionary Parameter --------- keys: either a tuple `(key, alias)` or any valid key for a dictionary The key and optionally the alias of the new element. value: any input accepted as value by a dictionary The value of the new element.i Notes ----- You can add elements to the dictionary as follows: 1) my_dict[key] = value 2) my_dict[key, alias] = value
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/datasets/hdf5.py#L378-L415
__author__ = "Francesco Visin" __license__ = "3-clause BSD" __credits__ = "Francesco Visin and Steven Kearnes" __maintainer__ = "Francesco Visin" try: import h5py except ImportError: h5py = None try: import tables except ImportError: tables = None import warnings from os.path import isfile from pylearn2.compat import OrderedDict from pylearn2.datasets import cache from pylearn2.datasets.dataset import Dataset from pylearn2.datasets.hdf5_deprecated import HDF5DatasetDeprecated from pylearn2.utils import safe_zip, wraps, py_integer_types from pylearn2.utils.iteration import FiniteDatasetIterator from pylearn2.utils.exc import reraise_as from pylearn2.space import Space, CompositeSpace from theano.compat.six import string_types class HDF5Dataset(Dataset): def __new__(cls, filename, X=None, topo_view=None, y=None, load_all=False, cache_size=None, sources=None, spaces=None, aliases=None, use_h5py='auto', **kwargs): if X is not None or topo_view is not None: warnings.warn( 'A dataset is using the old interface that is now deprecated ' 'and will become officially unsupported as of July 27, 2015. ' 'The dataset should use the new interface that inherits from ' 'the dataset class instead of the DenseDesignMatrix class. ' 'Please refer to pylearn2.datasets.hdf5.py for more details ' 'on arguments and details of the new ' 'interface.', DeprecationWarning) return HDF5DatasetDeprecated(filename, X, topo_view, y, load_all, cache_size, **kwargs) else: return super(HDF5Dataset, cls).__new__( cls, filename, sources, spaces, aliases, load_all, cache_size, use_h5py, **kwargs) def __init__(self, filename, sources, spaces, aliases=None, load_all=False, cache_size=None, use_h5py='auto', **kwargs): assert isinstance(filename, string_types) assert isfile(filename), '%s does not exist.' % filename assert isinstance(sources, list) assert all([isinstance(el, string_types) for el in sources]) assert isinstance(spaces, list) assert all([isinstance(el, Space) for el in spaces]) assert len(sources) == len(spaces) if aliases is not None: assert isinstance(aliases, list) assert all([isinstance(el, string_types) for el in aliases if el is not None]) assert len(aliases) == len(sources) assert isinstance(load_all, bool) assert cache_size is None or isinstance(cache_size, py_integer_types) assert isinstance(use_h5py, bool) or use_h5py == 'auto' self.load_all = load_all self._aliases = aliases if aliases else [None for _ in sources] self._sources = sources self.spaces = alias_dict() for i, (source, alias) in enumerate(safe_zip(self._sources, self._aliases)): self.spaces[source, alias] = spaces[i] del spaces, aliases, sources if load_all: warnings.warn('You can load all the data in memory for speed, but ' 'DO NOT use modify all the dataset at once (e.g., ' 'reshape, transform, etc, ...) because your code ' 'will fail if at some point you won\'t have enough ' 'memory to store the dataset alltogheter. Use the ' 'iterator to reshape the data after you load it ' 'from the dataset.') datasetCache = cache.datasetCache filename = datasetCache.cache_file(filename) if use_h5py == 'auto': use_h5py = True if tables is None else False if use_h5py: if h5py is None: raise RuntimeError("Could not import h5py.") if cache_size: propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS) settings = list(propfaid.get_cache()) settings[2] = cache_size propfaid.set_cache(*settings) self._fhandler = h5py.File(h5py.h5f.open(filename, fapl=propfaid), mode='r') else: self._fhandler = h5py.File(filename, mode='r') else: if tables is None: raise RuntimeError("Could not import tables.") self._fhandler = tables.openFile(filename, mode='r') self.data = self._read_hdf5(self._sources, self._aliases, load_all, use_h5py) assert len(self.data) != 0, ( 'No dataset was loaded. Please make sure that sources is a list ' 'with at least one value and that the provided values are keys of ' 'the dataset you are trying to load.') super(HDF5Dataset, self).__init__(**kwargs) def _read_hdf5(self, sources, aliases, load_all=False, use_h5py=True): data = alias_dict() if use_h5py: for s, a in safe_zip(sources, aliases): if load_all: data[s, a] = self._fhandler[s][:] else: data[s, a] = self._fhandler[s] data[s].ndim = len(data[s].shape) else: for s, a in safe_zip(sources, aliases): if load_all: data[s, a](self._fhandler.getNode('/', s)[:]) else: data[s, a] = self._fhandler.getNode('/', s) return data @wraps(Dataset.iterator, assigned=(), updated=(), append=True) def iterator(self, mode=None, data_specs=None, batch_size=None, num_batches=None, rng=None, return_tuple=False, **kwargs): if data_specs is None: data_specs = (self._get_sources, self._get_spaces) [mode, batch_size, num_batches, rng, data_specs] = self._init_iterator( mode, batch_size, num_batches, rng, data_specs) convert = None return FiniteDatasetIterator(self, mode(self.get_num_examples(), batch_size, num_batches, rng), data_specs=data_specs, return_tuple=return_tuple, convert=convert) def _get_sources(self): return tuple([alias if alias else source for alias, source in safe_zip(self._aliases, self._sources)]) def _get_spaces(self): space = [self.spaces[s] for s in self._get_sources] return space[0] if len(space) == 1 else tuple(space) def get_data_specs(self, source_or_alias=None): if source_or_alias is None: source_or_alias = self._get_sources() if isinstance(source_or_alias, (list, tuple)): space = tuple([self.spaces[s] for s in source_or_alias]) space = CompositeSpace(space) else: space = self.spaces[source_or_alias] return (space, source_or_alias) def get_data(self): return tuple([self.data[s] for s in self._get_sources()]) def get(self, sources, indexes): assert isinstance(sources, (tuple, list)) and len(sources) > 0, ( 'sources should be an instance of tuple and not empty') assert all([isinstance(el, string_types) for el in sources]), ( 'sources elements should be strings') assert isinstance(indexes, (tuple, list, slice, py_integer_types)), ( 'indexes should be either an int, a slice or a tuple/list of ints') if isinstance(indexes, (tuple, list)): assert len(indexes) > 0 and all([isinstance(i, py_integer_types) for i in indexes]), ( 'indexes elements should be ints') rval = [] for s in sources: try: sdata = self.data[s] except ValueError as e: reraise_as(ValueError( 'The requested source %s is not part of the dataset' % sources[s], *e.args)) if (isinstance(indexes, (slice, py_integer_types)) or len(indexes) == 1): rval.append(sdata[indexes]) else: warnings.warn('Accessing non sequential elements of an ' 'HDF5 file will be at best VERY slow. Avoid ' 'using iteration schemes that access ' 'random/shuffled data with hdf5 datasets!!') val = [] [val.append(sdata[idx]) for idx in indexes] rval.append(val) return tuple(rval) @wraps(Dataset.get_num_examples, assigned=(), updated=()) def get_num_examples(self, source_or_alias=None): assert source_or_alias is None or isinstance(source_or_alias, string_types) if source_or_alias is None: alias = self._get_sources() alias = alias[0] if isinstance(alias, (list, tuple)) else alias data = self.data[alias] else: data = self.data[source_or_alias] return data.shape[0] class alias_dict(OrderedDict): def __init__(self, **kwargs): self.__a2k__ = {} self.__k2a__ = {} super(alias_dict, self).__init__(**kwargs) def __getitem__(self, key_or_alias): assert isinstance(key_or_alias, string_types) try: return super(alias_dict, self).__getitem__(key_or_alias) except KeyError: return super(alias_dict, self).__getitem__( self.__a2k__[key_or_alias])
BSD 3-Clause New or Revised License
openforcefield/openff-qcsubmit
openff/qcsubmit/utils/utils.py
condense_molecules
python
def condense_molecules(molecules: List[off.Molecule]) -> off.Molecule: molecule = molecules.pop() for conformer in molecules: _, atom_map = off.Molecule.are_isomorphic( conformer, molecule, return_atom_map=True ) mapped_mol = conformer.remap(atom_map) for geometry in mapped_mol.conformers: molecule.add_conformer(geometry) return molecule
Take a list of identical molecules in different conformers and collapse them making sure that they are in the same order.
https://github.com/openforcefield/openff-qcsubmit/blob/4a239bfe606b541b4088a0f95da252ed21526197/openff/qcsubmit/utils/utils.py#L77-L89
from typing import Dict, Generator, List, Tuple from openff.toolkit import topology as off from openff.toolkit.utils.toolkits import ( RDKitToolkitWrapper, UndefinedStereochemistryError, ) def get_data(relative_path): import os from pkg_resources import resource_filename fn = resource_filename("openff.qcsubmit", os.path.join("data", relative_path)) if not os.path.exists(fn): raise ValueError( f"Sorry! {fn} does not exist. If you just added it, you'll have to re-install" ) return fn def check_missing_stereo(molecule: off.Molecule) -> bool: try: _ = off.Molecule.from_smiles( smiles=molecule.to_smiles(isomeric=True, explicit_hydrogens=True), hydrogens_are_explicit=True, allow_undefined_stereo=False, toolkit_registry=RDKitToolkitWrapper(), ) return False except UndefinedStereochemistryError: return True def clean_strings(string_list: List[str]) -> List[str]: clean_string = [] for string in string_list: new_string = string.strip() clean_string.append(new_string.strip(",")) return clean_string def remap_list(target_list: List[int], mapping: Dict[int, int]) -> List[int]: return [mapping[x] for x in target_list]
MIT License
neurosim-lab/netpyne
netpyne/network/modify.py
modifySynMechs
python
def modifySynMechs(self, params, updateMasterAllCells=False): from .. import sim sim.timing('start', 'modifySynMechsTime') if sim.rank==0: print('Modfying synaptic mech parameters...') for cell in self.cells: cell.modifySynMechs(params) if updateMasterAllCells: sim._gatherCells() sim.timing('stop', 'modifySynMechsTime') if sim.rank == 0 and sim.cfg.timing: print((' Done; syn mechs modification time = %0.2f s.' % sim.timingData['modifySynMechsTime']))
Function for/to <short description of `netpyne.network.modify.modifySynMechs`> Parameters ---------- self : <type> <Short description of self> **Default:** *required* params : <type> <Short description of params> **Default:** *required* updateMasterAllCells : bool <Short description of updateMasterAllCells> **Default:** ``False`` **Options:** ``<option>`` <description of option>
https://github.com/neurosim-lab/netpyne/blob/78f8310449330c781558271106a529566bbafe07/netpyne/network/modify.py#L61-L98
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() def modifyCells(self, params, updateMasterAllCells=False): from .. import sim sim.timing('start', 'modifyCellsTime') if sim.rank==0: print('Modfying cell parameters...') for cell in self.cells: cell.modify(params) if updateMasterAllCells: sim._gatherCells() sim.timing('stop', 'modifyCellsTime') if sim.rank == 0 and sim.cfg.timing: print((' Done; cells modification time = %0.2f s.' % sim.timingData['modifyCellsTime']))
MIT License
forseti-security/forseti-security
google/cloud/forseti/services/model/service.py
GrpcModeller.DeleteModel
python
def DeleteModel(self, request, _): model_name = request.handle if not model_name: LOGGER.warning('No model name in request: %s', request) status = model_pb2.DeleteModelReply.FAIL return model_pb2.DeleteModelReply(status=status) try: self.modeller.delete_model(model_name) status = model_pb2.DeleteModelReply.SUCCESS except Exception: LOGGER.exception('Unable to delete model: %s', model_name) status = model_pb2.DeleteModelReply.FAIL return model_pb2.DeleteModelReply(status=status)
Deletes a model and all associated data. Args: request (object): pb2 object of DeleteModelRequest _ (object): Not used Returns: object: pb2 object of DeleteModelReply
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/services/model/service.py#L95-L121
from builtins import object from google.cloud.forseti.common.util import string_formats from google.cloud.forseti.services.model import model_pb2 from google.cloud.forseti.services.model import model_pb2_grpc from google.cloud.forseti.services.model import modeller from google.cloud.forseti.common.util import logger LOGGER = logger.get_logger(__name__) class GrpcModeller(model_pb2_grpc.ModellerServicer): HANDLE_KEY = 'handle' def _get_handle(self, context): metadata = context.invocation_metadata() metadata_dict = {} for key, value in metadata: metadata_dict[key] = value return metadata_dict[self.HANDLE_KEY] def __init__(self, modeller_api): super(GrpcModeller, self).__init__() self.modeller = modeller_api def Ping(self, request, _): return model_pb2.PingReply(data=request.data) def CreateModel(self, request, context): LOGGER.debug('Received request to create model: %s', request) model = self.modeller.create_model(request.type, request.name, request.id, request.background) created_at_str = self._get_model_created_at_str(model) LOGGER.debug('Model %s created at: %s', model, created_at_str) reply = model_pb2.CreateModelReply(model=model_pb2.ModelSimplified( name=model.name, handle=model.handle, status=model.state, createdAt=created_at_str, description=model.description)) return reply
Apache License 2.0
byceps/byceps
byceps/services/shop/order/service.py
_build_line_items
python
def _build_line_items( cart_items: list[CartItem], order: DbOrder ) -> Iterator[DbLineItem]: for cart_item in cart_items: article = cart_item.article quantity = cart_item.quantity line_amount = cart_item.line_amount yield DbLineItem( order, article.item_number, article.type_, article.description, article.price, article.tax_rate, quantity, line_amount, article.processing_required, )
Build line items from the cart's content.
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/services/shop/order/service.py#L128-L147
from __future__ import annotations from datetime import datetime from typing import Iterator, Mapping, Optional, Sequence from flask import current_app from flask_babel import lazy_gettext from sqlalchemy.exc import IntegrityError from ....database import db, paginate, Pagination from ....events.shop import ShopOrderCanceled, ShopOrderPaid, ShopOrderPlaced from ....typing import UserID from ...user import service as user_service from ..article import service as article_service from ..cart.models import Cart, CartItem from ..shop.dbmodels import Shop as DbShop from ..shop import service as shop_service from ..shop.transfer.models import ShopID from ..storefront import service as storefront_service from ..storefront.transfer.models import StorefrontID from .dbmodels.line_item import LineItem as DbLineItem from .dbmodels.order import Order as DbOrder from .dbmodels.order_event import OrderEvent as DbOrderEvent, OrderEventData from . import action_service, event_service, sequence_service from .transfer.models import ( Address, Order, OrderID, LineItem, Orderer, OrderNumber, OrderState, PaymentState, ) class OrderFailed(Exception): pass def place_order( storefront_id: StorefrontID, orderer: Orderer, cart: Cart, *, created_at: Optional[datetime] = None, ) -> tuple[Order, ShopOrderPlaced]: storefront = storefront_service.get_storefront(storefront_id) shop = shop_service.get_shop(storefront.shop_id) orderer_user = user_service.get_user(orderer.user_id) order_number_sequence = sequence_service.get_order_number_sequence( storefront.order_number_sequence_id ) order_number = sequence_service.generate_order_number( order_number_sequence.id ) cart_items = cart.get_items() order = _build_order(shop.id, order_number, orderer, created_at) line_items = list(_build_line_items(cart_items, order)) order.total_amount = cart.calculate_total_amount() order.processing_required = any( line_item.processing_required for line_item in line_items ) db.session.add(order) db.session.add_all(line_items) _reduce_article_stock(cart_items) try: db.session.commit() except IntegrityError as e: current_app.logger.error('Order %s failed: %s', order_number, e) db.session.rollback() raise OrderFailed() order_dto = _order_to_transfer_object(order) event = ShopOrderPlaced( occurred_at=order.created_at, initiator_id=orderer_user.id, initiator_screen_name=orderer_user.screen_name, order_id=order.id, order_number=order.order_number, orderer_id=orderer_user.id, orderer_screen_name=orderer_user.screen_name, ) return order_dto, event def _build_order( shop_id: ShopID, order_number: OrderNumber, orderer: Orderer, created_at: Optional[datetime], ) -> DbOrder: return DbOrder( shop_id, order_number, orderer.user_id, orderer.first_names, orderer.last_name, orderer.country, orderer.zip_code, orderer.city, orderer.street, created_at=created_at, )
BSD 3-Clause New or Revised License
unofficial-memsource/memsource-cli-client
memsource_cli/models/segmentation_rule_dto.py
SegmentationRuleDto.created_by
python
def created_by(self, created_by): self._created_by = created_by
Sets the created_by of this SegmentationRuleDto. created by user # noqa: E501 :param created_by: The created_by of this SegmentationRuleDto. # noqa: E501 :type: UserReference
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/segmentation_rule_dto.py#L222-L231
import pprint import re import six from memsource_cli.models.user_reference import UserReference class SegmentationRuleDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'name': 'str', 'locale': 'str', 'primary': 'bool', 'filename': 'str', 'date_created': 'datetime', 'created_by': 'UserReference' } attribute_map = { 'id': 'id', 'name': 'name', 'locale': 'locale', 'primary': 'primary', 'filename': 'filename', 'date_created': 'dateCreated', 'created_by': 'createdBy' } def __init__(self, id=None, name=None, locale=None, primary=None, filename=None, date_created=None, created_by=None): self._id = None self._name = None self._locale = None self._primary = None self._filename = None self._date_created = None self._created_by = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name if locale is not None: self.locale = locale if primary is not None: self.primary = primary if filename is not None: self.filename = filename if date_created is not None: self.date_created = date_created if created_by is not None: self.created_by = created_by @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def locale(self): return self._locale @locale.setter def locale(self, locale): self._locale = locale @property def primary(self): return self._primary @primary.setter def primary(self, primary): self._primary = primary @property def filename(self): return self._filename @filename.setter def filename(self, filename): self._filename = filename @property def date_created(self): return self._date_created @date_created.setter def date_created(self, date_created): self._date_created = date_created @property def created_by(self): return self._created_by @created_by.setter
Apache License 2.0
shiyuechengineer/meraki-dashboard
meraki_v0/api/wireless_health.py
WirelessHealth.getNetworkClientsConnectionStats
python
def getNetworkClientsConnectionStats(self, networkId: str, **kwargs): kwargs.update(locals()) if 'band' in kwargs: options = ['2.4', '5'] assert kwargs['band'] in options, f'''"band" cannot be "{kwargs['band']}", & must be set to one of: {options}''' metadata = { 'tags': ['Wireless health'], 'operation': 'getNetworkClientsConnectionStats', } resource = f'/networks/{networkId}/clients/connectionStats' query_params = ['t0', 't1', 'timespan', 'band', 'ssid', 'vlan', 'apTag'] params = {k.strip(): v for (k, v) in kwargs.items() if k.strip() in query_params} return self._session.get(metadata, resource, params)
**Aggregated connectivity info for this network, grouped by clients** https://developer.cisco.com/meraki/api/#!get-network-clients-connection-stats - networkId (string) - t0 (string): The beginning of the timespan for the data. The maximum lookback period is 180 days from today. - t1 (string): The end of the timespan for the data. t1 can be a maximum of 7 days after t0. - timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 7 days. - band (string): Filter results by band (either '2.4' or '5'). Note that data prior to February 2020 will not have band information. - ssid (integer): Filter results by SSID - vlan (integer): Filter results by VLAN - apTag (string): Filter results by AP Tag
https://github.com/shiyuechengineer/meraki-dashboard/blob/f00442acf762a94e7e446f80a2485d120e7090d5/meraki_v0/api/wireless_health.py#L6-L36
class WirelessHealth(object): def __init__(self, session): super(WirelessHealth, self).__init__() self._session = session
MIT License
pranjaldatta/pyvision
pyvision/misc/mtcnn/model.py
MTCNN.download_weights
python
def download_weights(self, wtspath): if os.path.exists(wtspath) and len(os.listdir(wtspath)) > 0: return 1 elif os.path.exists(__PREFIX__ + "/weights/") and len(os.listdir(__PREFIX__+"/weights/")) == 0: os.rmdir(__PREFIX__+"/weights/") elif not os.path.exists(wtspath): os.mkdir(__PREFIX__ + "/weights/") with open(__PREFIX__+"/config/weights_download.json") as fp: json_file = json.load(fp) try: for net in ["pnet", "rnet", "onet"]: print("downloading {}.npy".format(net)) url = 'https://drive.google.com/uc?id={}'.format(json_file[net]) gdown.download(url, wtspath+"/{}.npy".format(net), quiet=False) except Exception as exp: print("Error at weights download. ", exp) exit() return 0
download_weights Parameters ---------- wtspath : [str; path] [the full path to the ./weights/ folder] Returns ------- [int] [1 : weights already exist 2 : weights have been downloaded successfully]
https://github.com/pranjaldatta/pyvision/blob/ad57b27cf790c267772402e47bd9e140ba6f549e/pyvision/misc/mtcnn/model.py#L58-L92
import numpy as np from PIL import Image import os import json import gdown import cv2 import torch from .nets import PNet, RNet, ONet from .stage_one import first_stage from .stage_two import get_image_boxes from .utils.visualize import show_boxes from .utils.utils import nms, convert_to_square, calibrate_boxes __PREFIX__ = os.path.dirname(os.path.realpath(__file__)) class MTCNN: def __init__(self, device="cpu", min_face_size = 20.0, conf_thresh=[0.7, 0.7, 0.8], nms_thresh=[0.7, .7, .7], pretrained=True): if device is not "cpu": raise NotImplementedError("gpu support not implemented. cpu only.") if len(conf_thresh) != 3 or len(nms_thresh) != 3: raise AssertionError("conf_thresh or nms_thresh of len :{},{} while expected size: 3".format(len(conf_thresh), len(nms_thresh))) if min_face_size <= 0.0 or min_face_size is None: raise ValueError("min_face_size expected > 0.0 . Found {}".format(min_face_size)) if not pretrained: raise NotImplementedError("Only Inference supported. Found pretrained=", pretrained) self.min_face_size = min_face_size self.conf_thresh = conf_thresh self.nms_thresh = nms_thresh self.pretrained = pretrained self.weights_path = __PREFIX__ + "/weights/" if pretrained: resp = self.download_weights(self.weights_path) if resp == 1: print("Weight files exist.") elif resp == 0: print("Weight files downloaded.") self.pnet = PNet() self.rnet = RNet() self.onet = ONet()
BSD 3-Clause New or Revised License
azure/azure-linux-extensions
DSC/azure/servicemanagement/servicebusmanagementservice.py
ServiceBusManagementService.list_topics
python
def list_topics(self, name): response = self._perform_get( self._get_list_topics_path(name), None) return _convert_response_to_feeds(response, TopicDescription)
Retrieves the topics in the service namespace. name: Name of the service bus namespace.
https://github.com/azure/azure-linux-extensions/blob/128c1c5babfe5f74ad3c00be95abce3c54311e91/DSC/azure/servicemanagement/servicebusmanagementservice.py#L133-L143
from azure import ( MANAGEMENT_HOST, _convert_response_to_feeds, _str, _validate_not_none, ) from azure.servicemanagement import ( _ServiceBusManagementXmlSerializer, QueueDescription, TopicDescription, NotificationHubDescription, RelayDescription, ) from azure.servicemanagement.servicemanagementclient import ( _ServiceManagementClient, ) class ServiceBusManagementService(_ServiceManagementClient): def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST): super(ServiceBusManagementService, self).__init__( subscription_id, cert_file, host) def get_regions(self): response = self._perform_get( self._get_path('services/serviceBus/Regions/', None), None) return _convert_response_to_feeds( response, _ServiceBusManagementXmlSerializer.xml_to_region) def list_namespaces(self): response = self._perform_get( self._get_path('services/serviceBus/Namespaces/', None), None) return _convert_response_to_feeds( response, _ServiceBusManagementXmlSerializer.xml_to_namespace) def get_namespace(self, name): response = self._perform_get( self._get_path('services/serviceBus/Namespaces', name), None) return _ServiceBusManagementXmlSerializer.xml_to_namespace( response.body) def create_namespace(self, name, region): _validate_not_none('name', name) return self._perform_put( self._get_path('services/serviceBus/Namespaces', name), _ServiceBusManagementXmlSerializer.namespace_to_xml(region)) def delete_namespace(self, name): _validate_not_none('name', name) return self._perform_delete( self._get_path('services/serviceBus/Namespaces', name), None) def check_namespace_availability(self, name): _validate_not_none('name', name) response = self._perform_get( self._get_path('services/serviceBus/CheckNamespaceAvailability', None) + '/?namespace=' + _str(name), None) return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability( response.body) def list_queues(self, name): _validate_not_none('name', name) response = self._perform_get( self._get_list_queues_path(name), None) return _convert_response_to_feeds(response, QueueDescription)
Apache License 2.0
pag-crypto/zkmbs
tls_testing/tlslite-ng-0.8.0-alpha40/tlslite/mathtls.py
calc_key
python
def calc_key(version, secret, cipher_suite, label, handshake_hashes=None, client_random=None, server_random=None, output_length=None): if version == (3, 0): if label == b"client finished": senderStr = b"\x43\x4C\x4E\x54" return handshake_hashes.digestSSL(secret, senderStr) elif label == b"server finished": senderStr = b"\x53\x52\x56\x52" return handshake_hashes.digestSSL(secret, senderStr) else: assert label in [b"key expansion", b"master secret"] func = PRF_SSL elif version in ((3, 1), (3, 2)): func = PRF if label == b"extended master secret": seed = handshake_hashes.digest('md5') + handshake_hashes.digest('sha1') elif label in [b"server finished", b"client finished"]: seed = handshake_hashes.digest() else: assert label in [b"key expansion", b"master secret"] else: assert version == (3, 3) if cipher_suite in CipherSuite.sha384PrfSuites: func = PRF_1_2_SHA384 if label in [b"extended master secret", b"server finished", b"client finished"]: seed = handshake_hashes.digest('sha384') else: assert label in [b"key expansion", b"master secret"] else: func = PRF_1_2 if label in [b"extended master secret", b"server finished", b"client finished"]: seed = handshake_hashes.digest('sha256') else: assert label in [b"key expansion", b"master secret"] if label == b"key expansion": seed = server_random + client_random if label == b"master secret": seed = client_random + server_random if func == PRF_SSL: return func(secret, seed, output_length) return func(secret, label, seed, output_length)
Method for calculating different keys depending on input. It can be used to calculate finished value, master secret, extended master secret or key expansion. :param version: TLS protocol version :type version: tuple(int, int) :param bytearray secret: master secret or premasterSecret which will be used in the PRF. :param int cipher_suite: Negotiated cipher suite of the connection. :param bytes label: label for the key you want to calculate (ex. 'master secret', 'extended master secret', etc). :param handshake_hashes: running hash of the handshake messages needed for calculating extended master secret or finished value. :type handshake_hashes: ~tlslite.handshakehashes.HandshakeHashes :param bytearray client_random: client random needed for calculating master secret or key expansion. :param bytearray server_random: server random needed for calculating master secret or key expansion. :param int output_length: Number of bytes to output.
https://github.com/pag-crypto/zkmbs/blob/895cf22144df1967b5582b3721d4a6967a5fea9c/tls_testing/tlslite-ng-0.8.0-alpha40/tlslite/mathtls.py#L827-L908
from .utils.compat import * from .utils.cryptomath import * from .constants import CipherSuite from .utils import tlshashlib as hashlib from .utils import tlshmac as hmac from .utils.deprecations import deprecated_method FFDHE_PARAMETERS = {} RFC2409_GROUP1 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A63A3620 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC2409 group 1"] = RFC2409_GROUP1 RFC2409_GROUP2 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE65381 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC2409 group 2"] = RFC2409_GROUP2 RFC3526_GROUP5 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 5"] = RFC3526_GROUP5 RFC3526_GROUP14 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 14"] = RFC3526_GROUP14 RFC3526_GROUP15 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 15"] = RFC3526_GROUP15 RFC3526_GROUP16 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 16"] = RFC3526_GROUP16 RFC3526_GROUP17 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 17"] = RFC3526_GROUP17 RFC3526_GROUP18 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF"""), 16)) FFDHE_PARAMETERS["RFC3526 group 18"] = RFC3526_GROUP18 goodGroupParameters = [ (2, int(remove_whitespace( """ EEAF0AB9 ADB38DD6 9C33F80A FA8FC5E8 60726187 75FF3C0B 9EA2314C 9C256576 D674DF74 96EA81D3 383B4813 D692C6E0 E0D5D8E2 50B98BE4 8E495C1D 6089DAD1 5DC7D7B4 6154D6B6 CE8EF4AD 69B15D49 82559B29 7BCF1885 C529F566 660E57EC 68EDBC3C 05726CC0 2FD4CBF4 976EAA9A FD5138FE 8376435B 9FC61D2F C0EB06E3"""), 16)), (2, int(remove_whitespace( """ 9DEF3CAF B939277A B1F12A86 17A47BBB DBA51DF4 99AC4C80 BEEEA961 4B19CC4D 5F4F5F55 6E27CBDE 51C6A94B E4607A29 1558903B A0D0F843 80B655BB 9A22E8DC DF028A7C EC67F0D0 8134B1C8 B9798914 9B609E0B E3BAB63D 47548381 DBC5B1FC 764E3F4B 53DD9DA1 158BFD3E 2B9C8CF5 6EDF0195 39349627 DB2FD53D 24B7C486 65772E43 7D6C7F8C E442734A F7CCB7AE 837C264A E3A9BEB8 7F8A2FE9 B8B5292E 5A021FFF 5E91479E 8CE7A28C 2442C6F3 15180F93 499A234D CF76E3FE D135F9BB"""), 16)), (2, int(remove_whitespace( """ AC6BDB41 324A9A9B F166DE5E 1389582F AF72B665 1987EE07 FC319294 3DB56050 A37329CB B4A099ED 8193E075 7767A13D D52312AB 4B03310D CD7F48A9 DA04FD50 E8083969 EDB767B0 CF609517 9A163AB3 661A05FB D5FAAAE8 2918A996 2F0B93B8 55F97993 EC975EEA A80D740A DBF4FF74 7359D041 D5C33EA7 1D281E44 6B14773B CA97B43A 23FB8016 76BD207A 436C6481 F1D2B907 8717461A 5B9D32E6 88F87748 544523B5 24B0D57D 5EA77A27 75D2ECFA 032CFBDB F52FB378 61602790 04E57AE6 AF874E73 03CE5329 9CCC041C 7BC308D8 2A5698F3 A8D0C382 71AE35F8 E9DBFBB6 94B5C803 D89F7AE4 35DE236D 525F5475 9B65E372 FCD68EF2 0FA7111F 9E4AFF73"""), 16)), (5, int(remove_whitespace( """ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF"""), 16)), (5, int(remove_whitespace( """ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF"""), 16)), (5, int(remove_whitespace( """ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF"""), 16)), (19, int(remove_whitespace( """ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF"""), 16))] for num, group in enumerate(goodGroupParameters, 1): FFDHE_PARAMETERS["RFC5054 group {0}".format(num)] = group goodGroupParameters.append((2, goodGroupParameters[3][1])) goodGroupParameters.append((5, goodGroupParameters[6][1])) RFC5114_GROUP22 = ( int(remove_whitespace(""" A4D1CBD5 C3FD3412 6765A442 EFB99905 F8104DD2 58AC507F D6406CFF 14266D31 266FEA1E 5C41564B 777E690F 5504F213 160217B4 B01B886A 5E91547F 9E2749F4 D7FBD7D3 B9A92EE1 909D0D22 63F80A76 A6A24C08 7A091F53 1DBF0A01 69B6A28A D662A4D1 8E73AFA3 2D779D59 18D08BC8 858F4DCE F97C2A24 855E6EEB 22B3B2E5"""), 16), int(remove_whitespace(""" B10B8F96 A080E01D DE92DE5E AE5D54EC 52C99FBC FB06A3C6 9A6A9DCA 52D23B61 6073E286 75A23D18 9838EF1E 2EE652C0 13ECB4AE A9061123 24975C3C D49B83BF ACCBDD7D 90C4BD70 98488E9C 219A7372 4EFFD6FA E5644738 FAA31A4F F55BCCC0 A151AF5F 0DC8B4BD 45BF37DF 365C1A65 E68CFDA7 6D4DA708 DF1FB2BC 2E4A4371"""), 16)) FFDHE_PARAMETERS["RFC5114 group 22"] = RFC5114_GROUP22 RFC5114_GROUP23 = ( int(remove_whitespace(""" AC4032EF 4F2D9AE3 9DF30B5C 8FFDAC50 6CDEBE7B 89998CAF 74866A08 CFE4FFE3 A6824A4E 10B9A6F0 DD921F01 A70C4AFA AB739D77 00C29F52 C57DB17C 620A8652 BE5E9001 A8D66AD7 C1766910 1999024A F4D02727 5AC1348B B8A762D0 521BC98A E2471504 22EA1ED4 09939D54 DA7460CD B5F6C6B2 50717CBE F180EB34 118E98D1 19529A45 D6F83456 6E3025E3 16A330EF BB77A86F 0C1AB15B 051AE3D4 28C8F8AC B70A8137 150B8EEB 10E183ED D19963DD D9E263E4 770589EF 6AA21E7F 5F2FF381 B539CCE3 409D13CD 566AFBB4 8D6C0191 81E1BCFE 94B30269 EDFE72FE 9B6AA4BD 7B5A0F1C 71CFFF4C 19C418E1 F6EC0179 81BC087F 2A7065B3 84B890D3 191F2BFA"""), 16), int(remove_whitespace(""" AD107E1E 9123A9D0 D660FAA7 9559C51F A20D64E5 683B9FD1 B54B1597 B61D0A75 E6FA141D F95A56DB AF9A3C40 7BA1DF15 EB3D688A 309C180E 1DE6B85A 1274A0A6 6D3F8152 AD6AC212 9037C9ED EFDA4DF8 D91E8FEF 55B7394B 7AD5B7D0 B6C12207 C9F98D11 ED34DBF6 C6BA0B2C 8BBC27BE 6A00E0A0 B9C49708 B3BF8A31 70918836 81286130 BC8985DB 1602E714 415D9330 278273C7 DE31EFDC 7310F712 1FD5A074 15987D9A DC0A486D CDF93ACC 44328387 315D75E1 98C641A4 80CD86A1 B9E587E8 BE60E69C C928B2B9 C52172E4 13042E9B 23F10B0E 16E79763 C9B53DCF 4BA80A29 E3FB73C1 6B8E75B9 7EF363E2 FFA31F71 CF9DE538 4E71B81C 0AC4DFFE 0C10E64F"""), 16)) FFDHE_PARAMETERS["RFC5114 group 23"] = RFC5114_GROUP23 RFC5114_GROUP24 = ( int(remove_whitespace(""" 3FB32C9B 73134D0B 2E775066 60EDBD48 4CA7B18F 21EF2054 07F4793A 1A0BA125 10DBC150 77BE463F FF4FED4A AC0BB555 BE3A6C1B 0C6B47B1 BC3773BF 7E8C6F62 901228F8 C28CBB18 A55AE313 41000A65 0196F931 C77A57F2 DDF463E5 E9EC144B 777DE62A AAB8A862 8AC376D2 82D6ED38 64E67982 428EBC83 1D14348F 6F2F9193 B5045AF2 767164E1 DFC967C1 FB3F2E55 A4BD1BFF E83B9C80 D052B985 D182EA0A DB2A3B73 13D3FE14 C8484B1E 052588B9 B7D2BBD2 DF016199 ECD06E15 57CD0915 B3353BBB 64E0EC37 7FD02837 0DF92B52 C7891428 CDC67EB6 184B523D 1DB246C3 2F630784 90F00EF8 D647D148 D4795451 5E2327CF EF98C582 664B4C0F 6CC41659"""), 16), int(remove_whitespace(""" 87A8E61D B4B6663C FFBBD19C 65195999 8CEEF608 660DD0F2 5D2CEED4 435E3B00 E00DF8F1 D61957D4 FAF7DF45 61B2AA30 16C3D911 34096FAA 3BF4296D 830E9A7C 209E0C64 97517ABD 5A8A9D30 6BCF67ED 91F9E672 5B4758C0 22E0B1EF 4275BF7B 6C5BFC11 D45F9088 B941F54E B1E59BB8 BC39A0BF 12307F5C 4FDB70C5 81B23F76 B63ACAE1 CAA6B790 2D525267 35488A0E F13C6D9A 51BFA4AB 3AD83477 96524D8E F6A167B5 A41825D9 67E144E5 14056425 1CCACB83 E6B486F6 B3CA3F79 71506026 C0B857F6 89962856 DED4010A BD0BE621 C3A3960A 54E710C3 75F26375 D7014103 A4B54330 C198AF12 6116D227 6E11715F 693877FA D7EF09CA DB094AE9 1E1A1597"""), 16)) FFDHE_PARAMETERS["RFC5114 group 24"] = RFC5114_GROUP24 RFC7919_GROUPS = [] FFDHE2048 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1 D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9 7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561 2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935 984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735 30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19 0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61 9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73 3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA 886B4238 61285C97 FFFFFFFF FFFFFFFF"""), 16)) goodGroupParameters.append(FFDHE2048) RFC7919_GROUPS.append(FFDHE2048) FFDHE_PARAMETERS["RFC7919 ffdhe2048"] = FFDHE2048 FFDHE3072 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1 D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9 7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561 2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935 984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735 30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19 0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61 9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73 3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA 886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238 61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3 64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF 3C1B20EE 3FD59D7C 25E41D2B 66C62E37 FFFFFFFF FFFFFFFF"""), 16)) goodGroupParameters.append(FFDHE3072) RFC7919_GROUPS.append(FFDHE3072) FFDHE_PARAMETERS["RFC7919 ffdhe3072"] = FFDHE3072 FFDHE4096 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1 D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9 7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561 2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935 984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735 30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19 0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61 9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73 3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA 886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238 61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3 64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF 3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB 7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004 87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832 A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A 1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF 8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E655F6A FFFFFFFF FFFFFFFF"""), 16)) goodGroupParameters.append(FFDHE4096) RFC7919_GROUPS.append(FFDHE4096) FFDHE_PARAMETERS["RFC7919 ffdhe4096"] = FFDHE4096 FFDHE6144 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1 D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9 7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561 2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935 984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735 30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19 0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61 9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73 3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA 886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238 61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3 64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF 3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB 7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004 87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832 A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A 1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF 8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902 0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6 3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477 A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3 0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4 763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6 B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04 5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1 A41D570D 7938DAD4 A40E329C D0E40E65 FFFFFFFF FFFFFFFF"""), 16)) goodGroupParameters.append(FFDHE6144) RFC7919_GROUPS.append(FFDHE6144) FFDHE_PARAMETERS["RFC7919 ffdhe6144"] = FFDHE6144 FFDHE8192 = ( 2, int(remove_whitespace(""" FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1 D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9 7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561 2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935 984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735 30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19 0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61 9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73 3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA 886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238 61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3 64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF 3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB 7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004 87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832 A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A 1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF 8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902 0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6 3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477 A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3 0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4 763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6 B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04 5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1 A41D570D 7938DAD4 A40E329C CFF46AAA 36AD004C F600C838 1E425A31 D951AE64 FDB23FCE C9509D43 687FEB69 EDD1CC5E 0B8CC3BD F64B10EF 86B63142 A3AB8829 555B2F74 7C932665 CB2C0F1C C01BD702 29388839 D2AF05E4 54504AC7 8B758282 2846C0BA 35C35F5C 59160CC0 46FD8251 541FC68C 9C86B022 BB709987 6A460E74 51A8A931 09703FEE 1C217E6C 3826E52C 51AA691E 0E423CFC 99E9E316 50C1217B 624816CD AD9A95F9 D5B80194 88D9C0A0 A1FE3075 A577E231 83F81D4A 3F2FA457 1EFC8CE0 BA8A4FE8 B6855DFE 72B0A66E DED2FBAB FBE58A30 FAFABE1C 5D71A87E 2F741EF8 C1FE86FE A6BBFDE5 30677F0D 97D11D49 F7A8443D 0822E506 A9F4614E 011E2A94 838FF88C D68C8BB7 C5C6424C FFFFFFFF FFFFFFFF"""), 16)) goodGroupParameters.append(FFDHE8192) RFC7919_GROUPS.append(FFDHE8192) FFDHE_PARAMETERS["RFC7919 ffdhe8192"] = FFDHE8192 def paramStrength(param): size = numBits(param) if size < 512: return 48 elif size < 768: return 56 elif size < 816: return 64 elif size < 1023: return 72 elif size < 1535: return 80 elif size < 2047: return 88 elif size < 3071: return 112 elif size < 4095: return 128 elif size < 6144: return 152 elif size < 7679: return 168 elif size < 15359: return 192 else: return 256 def P_hash(mac_name, secret, seed, length): ret = bytearray(length) seed = compatHMAC(seed) A = seed index = 0 mac = hmac.HMAC(compatHMAC(secret), digestmod=mac_name) while index < length: a_fun = mac.copy() a_fun.update(A) A = a_fun.digest() out_fun = mac.copy() out_fun.update(A) out_fun.update(seed) output = out_fun.digest() how_many = min(length - index, len(output)) ret[index:index+how_many] = output[:how_many] index += how_many return ret def PRF(secret, label, seed, length): S1 = secret[ : int(math.ceil(len(secret)/2.0))] S2 = secret[ int(math.floor(len(secret)/2.0)) : ] p_md5 = P_hash("md5", S1, label + seed, length) p_sha1 = P_hash("sha1", S2, label + seed, length) for x in range(length): p_md5[x] ^= p_sha1[x] return p_md5 def PRF_1_2(secret, label, seed, length): return P_hash("sha256", secret, label + seed, length) def PRF_1_2_SHA384(secret, label, seed, length): return P_hash("sha384", secret, label + seed, length) def PRF_SSL(secret, seed, length): bytes = bytearray(length) index = 0 for x in range(26): A = bytearray([ord('A')+x] * (x+1)) input = secret + SHA1(A + secret + seed) output = MD5(input) for c in output: if index >= length: return bytes bytes[index] = c index += 1 return bytes @deprecated_method("Please use calcKey method instead.") def calcExtendedMasterSecret(version, cipherSuite, premasterSecret, handshakeHashes): assert version in ((3, 1), (3, 2), (3, 3)) if version in ((3, 1), (3, 2)): masterSecret = PRF(premasterSecret, b"extended master secret", handshakeHashes.digest('md5') + handshakeHashes.digest('sha1'), 48) else: if cipherSuite in CipherSuite.sha384PrfSuites: masterSecret = PRF_1_2_SHA384(premasterSecret, b"extended master secret", handshakeHashes.digest('sha384'), 48) else: masterSecret = PRF_1_2(premasterSecret, b"extended master secret", handshakeHashes.digest('sha256'), 48) return masterSecret @deprecated_method("Please use calcKey method instead.") def calcMasterSecret(version, cipherSuite, premasterSecret, clientRandom, serverRandom): if version == (3,0): masterSecret = PRF_SSL(premasterSecret, clientRandom + serverRandom, 48) elif version in ((3,1), (3,2)): masterSecret = PRF(premasterSecret, b"master secret", clientRandom + serverRandom, 48) elif version == (3,3): if cipherSuite in CipherSuite.sha384PrfSuites: masterSecret = PRF_1_2_SHA384(premasterSecret, b"master secret", clientRandom + serverRandom, 48) else: masterSecret = PRF_1_2(premasterSecret, b"master secret", clientRandom + serverRandom, 48) else: raise AssertionError() return masterSecret @deprecated_method("Please use calcKey method instead.") def calcFinished(version, masterSecret, cipherSuite, handshakeHashes, isClient): assert version in ((3, 0), (3, 1), (3, 2), (3, 3)) if version == (3,0): if isClient: senderStr = b"\x43\x4C\x4E\x54" else: senderStr = b"\x53\x52\x56\x52" verifyData = handshakeHashes.digestSSL(masterSecret, senderStr) else: if isClient: label = b"client finished" else: label = b"server finished" if version in ((3,1), (3,2)): handshakeHash = handshakeHashes.digest() verifyData = PRF(masterSecret, label, handshakeHash, 12) else: if cipherSuite in CipherSuite.sha384PrfSuites: handshakeHash = handshakeHashes.digest('sha384') verifyData = PRF_1_2_SHA384(masterSecret, label, handshakeHash, 12) else: handshakeHash = handshakeHashes.digest('sha256') verifyData = PRF_1_2(masterSecret, label, handshakeHash, 12) return verifyData
MIT License
skoda091/alfred-deepl
lib/requests/api.py
get
python
def get(url, params=None, **kwargs): kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs)
r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response
https://github.com/skoda091/alfred-deepl/blob/8c7d7a572011bdee3888211a90a161ea50bb81af/lib/requests/api.py#L61-L72
from . import sessions def request(method, url, **kwargs): with sessions.Session() as session: return session.request(method=method, url=url, **kwargs)
MIT License
tensorflow/ranking
tensorflow_ranking/python/model.py
_rolling_window_indices
python
def _rolling_window_indices(size, rw_size, num_valid_entries): with tf.compat.v1.name_scope(name='rolling_window_indices'): rw_indices = tf.expand_dims(tf.range(rw_size), 0) + tf.expand_dims( tf.range(size), 1) batch_size = tf.shape(input=num_valid_entries)[0] batch_rw_indices = tf.tile( tf.expand_dims(rw_indices, 0), multiples=[batch_size, 1, 1]) batch_indices_mask = tf.less( tf.reduce_min(input_tensor=batch_rw_indices, axis=2), tf.reshape(num_valid_entries, [-1, 1])) num_valid_entries = tf.compat.v1.where( tf.less(num_valid_entries, 1), tf.ones_like(num_valid_entries), num_valid_entries) batch_rw_indices = tf.math.mod(batch_rw_indices, tf.reshape(num_valid_entries, [-1, 1, 1])) return batch_rw_indices, batch_indices_mask
Returns the rolling windows indices and mask for valid ones. When size = 3, rw_size = 2, returns [[0, 1], [1, 2], [2, 0]]. When size = 2, rw_size = 3, returns [[0, 1, 0], [1, 0, 1]]. When num_valid_entries = 2, the first returns [[0, 1], [1, 0], [0, 1]] and the first 2 are valid with mask as [True, True, False]. Args: size: A scalar int `Tensor` for the size. rw_size: A scalar int `Tensor` for the rw_size. num_valid_entries: A 1-D `Tensor` with shape [batch_size] representing the number of valid entries for each instance in a batch. Returns: A tuple of Tensors (batch_rw_indices, batch_indices_mask). The first has shape [batch_size, size, rw_size] and the second has shape [batch_size, size].
https://github.com/tensorflow/ranking/blob/6cf8f70a8533ba15abbfb5f50db17cb01fc56410/tensorflow_ranking/python/model.py#L163-L201
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow as tf from tensorflow.python.util import function_utils from tensorflow_ranking.python import feature from tensorflow_ranking.python import utils _NUM_SHUFFLES_TRAIN = 'num_shuffles_train' _NUM_SHUFFLES_EVAL = 'num_shuffles_eval' _NUM_SHUFFLES_PREDICT = 'num_shuffles_predict' def _get_params(mode, params): params = params or {} if mode == tf.estimator.ModeKeys.TRAIN: num_shuffles = params.get(_NUM_SHUFFLES_TRAIN, None) elif mode == tf.estimator.ModeKeys.EVAL: num_shuffles = params.get(_NUM_SHUFFLES_EVAL, None) elif mode == tf.estimator.ModeKeys.PREDICT: num_shuffles = params.get(_NUM_SHUFFLES_PREDICT, None) else: raise ValueError('Invalid mode: {}.'.format(mode)) return num_shuffles class _RankingModel(object): __metaclass__ = abc.ABCMeta def __init__(self, transform_fn=None): if transform_fn is None: self._transform_fn = feature.make_identity_transform_fn({}) else: self._transform_fn = transform_fn def _call_transform_fn(self, features, mode, params): transform_fn_args = function_utils.fn_args(self._transform_fn) if 'mode' in transform_fn_args and 'params' in transform_fn_args: return self._transform_fn(features, mode=mode, params=params) elif 'mode' in transform_fn_args: return self._transform_fn(features, mode=mode) elif 'params' in transform_fn_args: return self._transform_fn(features, params=params) else: return self._transform_fn(features) def compute_logits(self, features, labels, mode, params, config): with tf.compat.v1.name_scope('transform'): context_features, example_features = self._call_transform_fn( features, mode, params) for name, value in six.iteritems(example_features): tensor_shape = tf.convert_to_tensor(value=value).shape if (tensor_shape is not None and tensor_shape.rank is not None and tensor_shape.rank < 3): tf.compat.v1.logging.warning( 'Feature \"{}\" has invalid feature tensor shape {}. ' 'Expected shape has at least 3 dims: ' '(batch_size, list_size, feature_size).'.format( name, tensor_shape)) logits = self._compute_logits_impl(context_features, example_features, labels, mode, params, config) if mode == tf.estimator.ModeKeys.PREDICT: return logits else: features.update(context_features) features.update(example_features) return logits @abc.abstractmethod def _compute_logits_impl(self, context_features, example_features, labels, mode, params, config): raise NotImplementedError('Calling an abstract method.')
Apache License 2.0
fawaz-dabbaghieh/bubble_gun
BubbleGun/BubbleChain.py
BubbleChain.sort
python
def sort(self): all_ends = dict() for b in self.bubbles: source = str(b.source.id) sink = str(b.sink.id) if source > sink: all_ends[(source, sink)] = b else: all_ends[(sink, source)] = b start = self.ends[0] all_keys = list(all_ends.keys()) while len(self.sorted) < len(self.bubbles): for idx, key in enumerate(all_keys): if start in key: rm_key = idx start = key[1 - key.index(start)] self.sorted.append(all_ends[key]) break del all_keys[rm_key]
sorts the bubbles in the chain
https://github.com/fawaz-dabbaghieh/bubble_gun/blob/285c51b43b4a9c6770c7fc7e64fc1ae7fdddc643/BubbleGun/BubbleChain.py#L102-L144
from collections import Counter import pdb class BubbleChain: __slots__ = ['bubbles', 'sorted', 'ends', 'key', 'id', 'parent_chain', 'parent_sb'] def __init__(self): self.bubbles = set() self.sorted = [] self.ends = [] self.id = 0 self.parent_sb = 0 self.parent_chain = 0 def __key(self): if self.ends[0] > self.ends[1]: return (self.ends[0], self.ends[1]) return (self.ends[1], self.ends[0]) def __hash__(self): return hash(self.__key()) def __eq__(self, other): return self.__key() == other.__key() def __ne__(self, other): return not self.__eq__(other) def __len__(self): return len(self.bubbles) def __contains__(self, item): return item in self.bubbles def add_bubble(self, bubble): self.bubbles.add(bubble) def list_chain(self, ids=True): c_list = [] for b in self.bubbles: c_list += [b.source, b.sink] + b.inside if ids: return list(set([x.id for x in c_list])) return list(set(c_list)) def length_node(self): return len(self.list_chain()) def length_seq(self): total_seq = 0 counted_overlaps = set() for n in self.list_chain(ids=False): total_seq += n.seq_len if n.id not in counted_overlaps: for nn in n.end: counted_overlaps.add(nn[0]) total_seq -= nn[2] for nn in n.start: counted_overlaps.add(nn[0]) total_seq -= nn[2] return total_seq def find_ends(self): self.ends = [k for k, v in Counter([b.source.id for b in self.bubbles] + [b.sink.id for b in self.bubbles]).items() if v == 1]
MIT License
xrplf/xrpl-py
xrpl/core/addresscodec/codec.py
encode_node_public_key
python
def encode_node_public_key(bytestring: bytes) -> str: return _encode(bytestring, _NODE_PUBLIC_KEY_PREFIX, _NODE_PUBLIC_KEY_LENGTH)
Returns the node public key encoding of these bytes as a base58 string. Args: bytestring: Bytes to be encoded. Returns: The node public key encoding of these bytes as a base58 string.
https://github.com/xrplf/xrpl-py/blob/3635339bfb579353e56f126bbcf303d931b26d65/xrpl/core/addresscodec/codec.py#L143-L153
from typing import Dict, List, Tuple import base58 from typing_extensions import Final from xrpl.constants import CryptoAlgorithm from xrpl.core.addresscodec.exceptions import XRPLAddressCodecException from xrpl.core.addresscodec.utils import XRPL_ALPHABET _CLASSIC_ADDRESS_PREFIX: Final[List[int]] = [0x0] _ACCOUNT_PUBLIC_KEY_PREFIX: Final[List[int]] = [0x23] _FAMILY_SEED_PREFIX: Final[List[int]] = [0x21] _NODE_PUBLIC_KEY_PREFIX: Final[List[int]] = [0x1C] _ED25519_SEED_PREFIX: Final[List[int]] = [0x01, 0xE1, 0x4B] SEED_LENGTH: Final[int] = 16 _CLASSIC_ADDRESS_LENGTH: Final[int] = 20 _NODE_PUBLIC_KEY_LENGTH: Final[int] = 33 _ACCOUNT_PUBLIC_KEY_LENGTH: Final[int] = 33 _ALGORITHM_TO_PREFIX_MAP: Final[Dict[CryptoAlgorithm, List[int]]] = { CryptoAlgorithm.ED25519: _ED25519_SEED_PREFIX, CryptoAlgorithm.SECP256K1: _FAMILY_SEED_PREFIX, } def _encode(bytestring: bytes, prefix: List[int], expected_length: int) -> str: if expected_length and len(bytestring) != expected_length: error_message = """unexpected_payload_length: len(bytestring) does not match expected_length. Ensure that the bytes are a bytestring.""" raise XRPLAddressCodecException(error_message) encoded_prefix = bytes(prefix) payload = encoded_prefix + bytestring return base58.b58encode_check(payload, alphabet=XRPL_ALPHABET).decode("utf-8") def _decode(b58_string: str, prefix: bytes) -> bytes: prefix_length = len(prefix) decoded = base58.b58decode_check(b58_string, alphabet=XRPL_ALPHABET) if decoded[:prefix_length] != prefix: raise XRPLAddressCodecException("Provided prefix is incorrect") return decoded[prefix_length:] def encode_seed(entropy: bytes, encoding_type: CryptoAlgorithm) -> str: if len(entropy) != SEED_LENGTH: raise XRPLAddressCodecException(f"Entropy must have length {SEED_LENGTH}") if encoding_type not in CryptoAlgorithm: raise XRPLAddressCodecException( f"Encoding type must be one of {CryptoAlgorithm}" ) prefix = _ALGORITHM_TO_PREFIX_MAP[encoding_type] return _encode(entropy, prefix, SEED_LENGTH) def decode_seed(seed: str) -> Tuple[bytes, CryptoAlgorithm]: for algorithm in CryptoAlgorithm: prefix = _ALGORITHM_TO_PREFIX_MAP[algorithm] try: decoded_result = _decode(seed, bytes(prefix)) return decoded_result, algorithm except XRPLAddressCodecException: continue raise XRPLAddressCodecException( "Invalid seed; could not determine encoding algorithm" ) def encode_classic_address(bytestring: bytes) -> str: return _encode(bytestring, _CLASSIC_ADDRESS_PREFIX, _CLASSIC_ADDRESS_LENGTH) def decode_classic_address(classic_address: str) -> bytes: return _decode(classic_address, bytes(_CLASSIC_ADDRESS_PREFIX))
ISC License
adalca/neurite
neurite/tf/utils/seg.py
next_pred_label
python
def next_pred_label(model, data_generator, verbose=False): sample = next(data_generator) with timer.Timer('prediction', verbose): pred = model.predict(sample[0]) sample_input = sample[0] if not isinstance(sample[0], (list, tuple)) else sample[0][0] max_labels = pred_to_label(sample_input, pred) return (sample, pred) + max_labels
predict the next sample batch from the generator, and compute max labels return sample, prediction, max_labels
https://github.com/adalca/neurite/blob/c7bb05d5dae47d2a79e0fe5a8284f30b2304d335/neurite/tf/utils/seg.py#L263-L273
import itertools import numpy as np from tqdm import tqdm_notebook as tqdm from pprint import pformat import tensorflow as tf from tensorflow import keras import tensorflow.keras.backend as K import neurite as ne import neurite.py.utils import pystrum.pynd.ndutils as nd import pystrum.pynd.patchlib as pl import pystrum.pytools.timer as timer def predict_volumes(models, data_generator, batch_size, patch_size, patch_stride, grid_size, nan_func=np.nanmedian, do_extra_vol=False, do_prob_of_true=False, verbose=False): if not isinstance(models, (list, tuple)): models = (models,) with timer.Timer('predict_volume_stack', verbose): vol_stack = predict_volume_stack(models, data_generator, batch_size, grid_size, verbose) if len(models) == 1: do_prior = len(vol_stack) == 4 else: do_prior = len(vol_stack[0]) == 4 ret = () for midx, _ in enumerate(models): stack = vol_stack if len(models) == 1 else vol_stack[midx] if do_prior: all_true, all_pred, all_vol, all_prior = stack else: all_true, all_pred, all_vol = stack all_true_label, all_pred_label = pred_to_label(all_true, all_pred) args = [patch_size, grid_size, patch_stride] label_kwargs = {'nan_func_layers': nan_func, 'nan_func_K': nan_func, 'verbose': verbose} vol_true_label = _quilt(all_true_label, *args, **label_kwargs).astype('int') vol_pred_label = _quilt(all_pred_label, *args, **label_kwargs).astype('int') ret_set = (vol_true_label, vol_pred_label) if do_extra_vol: vol_input = _quilt(all_vol, *args) ret_set += (vol_input, ) if do_prior: all_prior_label, = pred_to_label(all_prior) vol_prior_label = _quilt(all_prior_label, *args, **label_kwargs).astype('int') ret_set += (vol_prior_label, ) if do_extra_vol and do_prob_of_true: all_pp = prob_of_label(all_pred, all_true_label) pred_prob_of_true = _quilt(all_pp, *args, **label_kwargs) ret_set += (pred_prob_of_true, ) if do_prior: all_pp = prob_of_label(all_prior, all_true_label) prior_prob_of_true = _quilt(all_pp, *args, **label_kwargs) ret_set += (prior_prob_of_true, ) ret += (ret_set, ) if len(models) == 1: ret = ret[0] return ret def predict_volume_stack(models, data_generator, batch_size, grid_size, verbose=False): if not isinstance(models, (list, tuple)): models = (models,) nb_patches = np.prod(grid_size) nb_batches = ((nb_patches - 1) // batch_size) + 1 batch_gen = tqdm(range(nb_batches)) if verbose else range(nb_batches) for batch_idx in batch_gen: sample = next(data_generator) nb_vox = np.prod(sample[1].shape[1:-1]) do_prior = isinstance(sample[0], (list, tuple)) if batch_idx == 0: nb_labels = sample[1].shape[-1] all_vol = [np.zeros((nb_patches, nb_vox)) for f in models] all_true = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] all_pred = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] all_prior = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] for idx, model in enumerate(models): pred = model.predict(sample[0]) assert pred.shape[0] == batch_size, "batch size mismatch. sample has batch size %d, given batch size is %d" % ( pred.shape[0], batch_size) input_batch = sample[0] if not do_prior else sample[0][0] batch_start = batch_idx * batch_size batch_end = np.minimum(batch_start + batch_size, nb_patches) batch_range = np.arange(batch_start, batch_end) batch_vox_idx = batch_end - batch_start all_vol[idx][batch_range, :] = K.batch_flatten(input_batch)[0:batch_vox_idx, :] all_true[idx][batch_range, :] = K.batch_flatten(sample[1])[0:batch_vox_idx, :] all_pred[idx][batch_range, :] = K.batch_flatten(pred)[0:batch_vox_idx, :] if do_prior: all_prior[idx][batch_range, :] = K.batch_flatten(sample[0][1])[0:batch_vox_idx, :] for idx, _ in enumerate(models): all_true[idx] = np.reshape(all_true[idx], [nb_patches, nb_vox, nb_labels]) all_pred[idx] = np.reshape(all_pred[idx], [nb_patches, nb_vox, nb_labels]) if do_prior: all_prior[idx] = np.reshape(all_prior[idx], [nb_patches, nb_vox, nb_labels]) ret = () for midx, _ in enumerate(models): if do_prior: ret += ((all_true[midx], all_pred[midx], all_vol[midx], all_prior[midx]), ) else: ret += ((all_true[midx], all_pred[midx], all_vol[midx]), ) if len(models) == 1: ret = ret[0] return ret def prob_of_label(vol, labelvol): nb_dims = np.ndim(labelvol) assert np.ndim(vol) == nb_dims + 1, "vol dimensions do not match [%d] vs [%d]" % (np.ndim(vol) - 1, nb_dims) shp = vol.shape nb_voxels = np.prod(shp[0:nb_dims]) nb_labels = shp[-1] flat_vol = np.reshape(vol, (nb_voxels, nb_labels)) rows_sums = flat_vol.sum(axis=1) flat_vol_norm = flat_vol / rows_sums[:, np.newaxis] idx = list(range(nb_voxels)) v = flat_vol_norm[idx, labelvol.flat] return np.reshape(v, labelvol.shape)
Apache License 2.0
lanceliang2018/wenku8toepub-online
ebooklib/epub.py
EpubBook.get_items
python
def get_items(self): return (item for item in self.items)
Returns all items attached to this book. :Returns: Returns all items as tuple.
https://github.com/lanceliang2018/wenku8toepub-online/blob/000230533eb2dab63746677b6682babf2d28f82c/ebooklib/epub.py#L781-L788
import zipfile import six import logging import uuid import posixpath as zip_path import os.path from collections import OrderedDict try: from urllib.parse import unquote except ImportError: from urllib import unquote from lxml import etree import ebooklib from ebooklib.utils import parse_string, parse_html_string, guess_type, get_pages_for_items VERSION = (0, 17, 1) NAMESPACES = {'XML': 'http://www.w3.org/XML/1998/namespace', 'EPUB': 'http://www.idpf.org/2007/ops', 'DAISY': 'http://www.daisy.org/z3986/2005/ncx/', 'OPF': 'http://www.idpf.org/2007/opf', 'CONTAINERNS': 'urn:oasis:names:tc:opendocument:xmlns:container', 'DC': 'http://purl.org/dc/elements/1.1/', 'XHTML': 'http://www.w3.org/1999/xhtml'} CONTAINER_PATH = 'META-INF/container.xml' CONTAINER_XML = '''<?xml version='1.0' encoding='utf-8'?> <container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0"> <rootfiles> <rootfile media-type="application/oebps-package+xml" full-path="%(folder_name)s/content.opf"/> </rootfiles> </container> ''' NCX_XML = six.b('''<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd"> <ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" />''') NAV_XML = six.b('''<?xml version="1.0" encoding="utf-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops"/>''') CHAPTER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" epub:prefix="z3998: http://www.daisy.org/z3998/2012/vocab/structure/#"></html>''') COVER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" lang="en" xml:lang="en"> <head> <style> body { margin: 0em; padding: 0em; } img { max-width: 100%; max-height: 100%; } </style> </head> <body> <img src="" alt="" /> </body> </html>''') IMAGE_MEDIA_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/svg+xml'] class Section(object): def __init__(self, title, href=''): self.title = title self.href = href class Link(object): def __init__(self, href, title, uid=None): self.href = href self.title = title self.uid = uid class EpubException(Exception): def __init__(self, code, msg): self.code = code self.msg = msg def __str__(self): return repr(self.msg) class EpubItem(object): def __init__(self, uid=None, file_name='', media_type='', content=six.b(''), manifest=True): self.id = uid self.file_name = file_name self.media_type = media_type self.content = content self.is_linear = True self.manifest = manifest self.book = None def get_id(self): return self.id def get_name(self): return self.file_name def get_type(self): _, ext = zip_path.splitext(self.get_name()) ext = ext.lower() for uid, ext_list in six.iteritems(ebooklib.EXTENSIONS): if ext in ext_list: return uid return ebooklib.ITEM_UNKNOWN def get_content(self, default=six.b('')): return self.content or default def set_content(self, content): self.content = content def __str__(self): return '<EpubItem:%s>' % self.id class EpubNcx(EpubItem): def __init__(self, uid='ncx', file_name='toc.ncx'): super(EpubNcx, self).__init__(uid=uid, file_name=file_name, media_type='application/x-dtbncx+xml') def __str__(self): return '<EpubNcx:%s>' % self.id class EpubCover(EpubItem): def __init__(self, uid='cover-img', file_name=''): super(EpubCover, self).__init__(uid=uid, file_name=file_name) def get_type(self): return ebooklib.ITEM_COVER def __str__(self): return '<EpubCover:%s:%s>' % (self.id, self.file_name) class EpubHtml(EpubItem): _template_name = 'chapter' def __init__(self, uid=None, file_name='', media_type='', content=None, title='', lang=None, direction=None, media_overlay=None, media_duration=None): super(EpubHtml, self).__init__(uid, file_name, media_type, content) self.title = title self.lang = lang self.direction = direction self.media_overlay = media_overlay self.media_duration = media_duration self.links = [] self.properties = [] self.pages = [] def is_chapter(self): return True def get_type(self): return ebooklib.ITEM_DOCUMENT def set_language(self, lang): self.lang = lang def get_language(self): return self.lang def add_link(self, **kwgs): self.links.append(kwgs) if kwgs.get('type') == 'text/javascript': if 'scripted' not in self.properties: self.properties.append('scripted') def get_links(self): return (link for link in self.links) def get_links_of_type(self, link_type): return (link for link in self.links if link.get('type', '') == link_type) def add_item(self, item): if item.get_type() == ebooklib.ITEM_STYLE: self.add_link(href=item.get_name(), rel='stylesheet', type='text/css') if item.get_type() == ebooklib.ITEM_SCRIPT: self.add_link(src=item.get_name(), type='text/javascript') def get_body_content(self): try: html_tree = parse_html_string(self.content) except: return '' html_root = html_tree.getroottree() if len(html_root.find('body')) != 0: body = html_tree.find('body') tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False) if tree_str.startswith(six.b('<body>')): n = tree_str.rindex(six.b('</body>')) return tree_str[6:n] return tree_str return '' def get_content(self, default=None): tree = parse_string(self.book.get_template(self._template_name)) tree_root = tree.getroot() tree_root.set('lang', self.lang or self.book.language) tree_root.attrib['{%s}lang' % NAMESPACES['XML']] = self.lang or self.book.language try: html_tree = parse_html_string(self.content) except: return '' html_root = html_tree.getroottree() _head = etree.SubElement(tree_root, 'head') if self.title != '': _title = etree.SubElement(_head, 'title') _title.text = self.title for lnk in self.links: if lnk.get('type') == 'text/javascript': _lnk = etree.SubElement(_head, 'script', lnk) _lnk.text = '' else: _lnk = etree.SubElement(_head, 'link', lnk) _body = etree.SubElement(tree_root, 'body') if self.direction: _body.set('dir', self.direction) tree_root.set('dir', self.direction) body = html_tree.find('body') if body is not None: for i in body.getchildren(): _body.append(i) tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def __str__(self): return '<EpubHtml:%s:%s>' % (self.id, self.file_name) class EpubCoverHtml(EpubHtml): def __init__(self, uid='cover', file_name='cover.xhtml', image_name='', title='Cover'): super(EpubCoverHtml, self).__init__(uid=uid, file_name=file_name, title=title) self.image_name = image_name self.is_linear = False def is_chapter(self): return False def get_content(self): self.content = self.book.get_template('cover') tree = parse_string(super(EpubCoverHtml, self).get_content()) tree_root = tree.getroot() images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']}) images[0].set('src', self.image_name) images[0].set('alt', self.title) tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def __str__(self): return '<EpubCoverHtml:%s:%s>' % (self.id, self.file_name) class EpubNav(EpubHtml): def __init__(self, uid='nav', file_name='nav.xhtml', media_type='application/xhtml+xml'): super(EpubNav, self).__init__(uid=uid, file_name=file_name, media_type=media_type) def is_chapter(self): return False def __str__(self): return '<EpubNav:%s:%s>' % (self.id, self.file_name) class EpubImage(EpubItem): def __init__(self): super(EpubImage, self).__init__() def get_type(self): return ebooklib.ITEM_IMAGE def __str__(self): return '<EpubImage:%s:%s>' % (self.id, self.file_name) class EpubSMIL(EpubItem): def __init__(self, uid=None, file_name='', content=None): super(EpubSMIL, self).__init__(uid=uid, file_name=file_name, media_type='application/smil+xml', content=content) def get_type(self): return ebooklib.ITEM_SMIL def __str__(self): return '<EpubSMIL:%s:%s>' % (self.id, self.file_name) class EpubBook(object): def __init__(self): self.EPUB_VERSION = None self.reset() def reset(self): self.metadata = {} self.items = [] self.spine = [] self.guide = [] self.pages = [] self.toc = [] self.bindings = [] self.IDENTIFIER_ID = 'id' self.FOLDER_NAME = 'EPUB' self._id_html = 0 self._id_image = 0 self._id_static = 0 self.title = '' self.language = 'en' self.direction = None self.templates = { 'ncx': NCX_XML, 'nav': NAV_XML, 'chapter': CHAPTER_XML, 'cover': COVER_XML } self.add_metadata('OPF', 'generator', '', { 'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION]) }) self.set_identifier(str(uuid.uuid4())) self.prefixes = [] self.namespaces = {} def set_identifier(self, uid): self.uid = uid self.set_unique_metadata('DC', 'identifier', self.uid, {'id': self.IDENTIFIER_ID}) def set_title(self, title): self.title = title self.add_metadata('DC', 'title', self.title) def set_language(self, lang): self.language = lang self.add_metadata('DC', 'language', lang) def set_direction(self, direction): self.direction = direction def set_cover(self, file_name, content, create_page=True): c0 = EpubCover(file_name=file_name) c0.content = content self.add_item(c0) if create_page: c1 = EpubCoverHtml(image_name=file_name) self.add_item(c1) self.add_metadata(None, 'meta', '', OrderedDict([('name', 'cover'), ('content', 'cover-img')])) def add_author(self, author, file_as=None, role=None, uid='creator'): self.add_metadata('DC', 'creator', author, {'id': uid}) if file_as: self.add_metadata(None, 'meta', file_as, {'refines': '#' + uid, 'property': 'file-as', 'scheme': 'marc:relators'}) if role: self.add_metadata(None, 'meta', role, {'refines': '#' + uid, 'property': 'role', 'scheme': 'marc:relators'}) def add_metadata(self, namespace, name, value, others=None): if namespace in NAMESPACES: namespace = NAMESPACES[namespace] if namespace not in self.metadata: self.metadata[namespace] = {} if name not in self.metadata[namespace]: self.metadata[namespace][name] = [] self.metadata[namespace][name].append((value, others)) def get_metadata(self, namespace, name): if namespace in NAMESPACES: namespace = NAMESPACES[namespace] return self.metadata[namespace].get(name, []) def set_unique_metadata(self, namespace, name, value, others=None): if namespace in NAMESPACES: namespace = NAMESPACES[namespace] if namespace in self.metadata and name in self.metadata[namespace]: self.metadata[namespace][name] = [(value, others)] else: self.add_metadata(namespace, name, value, others) def add_item(self, item): if item.media_type == '': (has_guessed, media_type) = guess_type(item.get_name().lower()) if has_guessed: if media_type is not None: item.media_type = media_type else: item.media_type = has_guessed else: item.media_type = 'application/octet-stream' if not item.get_id(): if isinstance(item, EpubHtml): item.id = 'chapter_%d' % self._id_html self._id_html += 1 self.pages += item.pages elif isinstance(item, EpubImage): item.id = 'image_%d' % self._id_image self._id_image += 1 else: item.id = 'static_%d' % self._id_image self._id_image += 1 item.book = self self.items.append(item) return item def get_item_with_id(self, uid): for item in self.get_items(): if item.id == uid: return item return None def get_item_with_href(self, href): for item in self.get_items(): if item.get_name() == href: return item return None
MIT License
virtuesecurity/aws-extender
BappModules/boto/__init__.py
connect_redshift
python
def connect_redshift(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.redshift.layer1 import RedshiftConnection return RedshiftConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs )
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.redshift.layer1.RedshiftConnection` :return: A connection to Amazon's Redshift service
https://github.com/virtuesecurity/aws-extender/blob/3029dd26bd7bdf7f4148e1e92adf9f8c547cafbe/BappModules/boto/__init__.py#L754-L772
from boto.pyami.config import Config, BotoConfigLocations from boto.storage_uri import BucketStorageUri, FileStorageUri import boto.plugin import datetime import os import platform import re import sys import logging import logging.config from boto.compat import urlparse from boto.exception import InvalidUriError __version__ = '2.48.0' Version = __version__ datetime.datetime.strptime('', '') UserAgent = 'Boto/%s Python/%s %s/%s' % ( __version__, platform.python_version(), platform.system(), platform.release() ) config = Config() BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)' r'#(?P<generation>[0-9]+)$') VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$') ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): for file in BotoConfigLocations: try: logging.config.fileConfig(os.path.expanduser(file)) except: pass class NullHandler(logging.Handler): def emit(self, record): pass log = logging.getLogger('boto') perflog = logging.getLogger('boto.perf') log.addHandler(NullHandler()) perflog.addHandler(NullHandler()) init_logging() def set_file_logger(name, filepath, level=logging.INFO, format_string=None): global log if not format_string: format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" logger = logging.getLogger(name) logger.setLevel(level) fh = logging.FileHandler(filepath) fh.setLevel(level) formatter = logging.Formatter(format_string) fh.setFormatter(formatter) logger.addHandler(fh) log = logger def set_stream_logger(name, level=logging.DEBUG, format_string=None): global log if not format_string: format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" logger = logging.getLogger(name) logger.setLevel(level) fh = logging.StreamHandler() fh.setLevel(level) formatter = logging.Formatter(format_string) fh.setFormatter(formatter) logger.addHandler(fh) log = logger def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sqs.connection import SQSConnection return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.s3.connection import S3Connection return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs): from boto.gs.connection import GSConnection return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs) def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.connection import EC2Connection return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.elb import ELBConnection return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.cloudwatch import CloudWatchConnection return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sdb.connection import SDBConnection return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.fps.connection import FPSConnection return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.mturk.connection import MTurkConnection return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudfront import CloudFrontConnection return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.vpc import VPCConnection return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.rds import RDSConnection return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.rds2.layer1 import RDSConnection return RDSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs ) def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.emr import EmrConnection return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sns import SNSConnection return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.iam import IAMConnection return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.route53 import Route53Connection return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudformation import CloudFormationConnection return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Eucalyptus', is_secure=False, **kwargs): from boto.ec2 import EC2Connection from boto.ec2.regioninfo import RegionInfo if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'eucalyptus_host', None) reg = RegionInfo(name='eucalyptus', endpoint=host) return EC2Connection(aws_access_key_id, aws_secret_access_key, region=reg, port=port, path=path, is_secure=is_secure, **kwargs) def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.glacier.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.regioninfo import RegionInfo purl = urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name=purl.hostname, endpoint=purl.hostname) kwargs['aws_access_key_id'] = aws_access_key_id kwargs['aws_secret_access_key'] = aws_secret_access_key return(connect_ec2(**kwargs)) def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Walrus', is_secure=False, **kwargs): from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'walrus_host', None) return S3Connection(aws_access_key_id, aws_secret_access_key, host=host, port=port, path=path, calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs) def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ses import SESConnection return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sts import STSConnection return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, is_secure=False, **kwargs): from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat access_key = config.get('Credentials', 'ia_access_key_id', ia_access_key_id) secret_key = config.get('Credentials', 'ia_secret_access_key', ia_secret_access_key) return S3Connection(access_key, secret_key, host='s3.us.archive.org', calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs) def connect_dynamodb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.dynamodb.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_swf(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.swf.layer1 import Layer1 return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudsearch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudsearch.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudsearch2(aws_access_key_id=None, aws_secret_access_key=None, sign_request=False, **kwargs): from boto.cloudsearch2.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, sign_request=sign_request, **kwargs) def connect_cloudsearchdomain(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection return CloudSearchDomainConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_beanstalk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.beanstalk.layer1 import Layer1 return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_elastictranscoder(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.elastictranscoder.layer1 import ElasticTranscoderConnection return ElasticTranscoderConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs) def connect_opsworks(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.opsworks.layer1 import OpsWorksConnection return OpsWorksConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs)
MIT License
aerospike/aerospike-client-python
aerospike_helpers/operations/map_operations.py
map_get_by_key_list
python
def map_get_by_key_list(bin_name, key_list, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_GET_BY_KEY_LIST, BIN_KEY: bin_name, VALUE_KEY: key_list, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx: op_dict[CTX_KEY] = ctx return op_dict
Creates a map_get_by_key_list operation to be used with operate or operate_ordered The operation returns items, specified by the keys in key_list from the map stored in the specified bin. Args: bin_name (str): The name of the bin containing the map. key_list (list): A list of keys to be returned from the map. return_type (int): Value specifying what should be returned from the operation. This should be one of the :ref:`map_return_types` values. inverted (bool): If true, keys with values not specified in the key_list will be returned, and those keys specified in the key_list will be ignored. Default: False ctx (list): An optional list of nested CDT context operations (:mod:`cdt_cdx <aerospike_helpers.cdt_ctx>` object) for use on nested CDTs. Returns: A dictionary usable in operate or operate_ordered. The format of the dictionary should be considered an internal detail, and subject to change.
https://github.com/aerospike/aerospike-client-python/blob/59fa0d36aa899a164282643fe49b27d12aaf323f/aerospike_helpers/operations/map_operations.py#L612-L640
import aerospike OP_KEY = "op" BIN_KEY = "bin" POLICY_KEY = "map_policy" VALUE_KEY = "val" KEY_KEY = "key" INDEX_KEY = "index" RETURN_TYPE_KEY = "return_type" INVERTED_KEY = "inverted" RANGE_KEY = "range" COUNT_KEY = "count" RANK_KEY = "rank" CTX_KEY = "ctx" def map_set_policy(bin_name, policy, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_SET_POLICY, BIN_KEY: bin_name, POLICY_KEY: policy } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_put(bin_name, key, value, map_policy=None, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_PUT, BIN_KEY: bin_name, KEY_KEY: key, VALUE_KEY: value } if map_policy is not None: op_dict[POLICY_KEY] = map_policy if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_put_items(bin_name, item_dict, map_policy=None, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_PUT_ITEMS, BIN_KEY: bin_name, VALUE_KEY: item_dict } if map_policy is not None: op_dict[POLICY_KEY] = map_policy if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_increment(bin_name, key, amount, map_policy=None, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_INCREMENT, BIN_KEY: bin_name, KEY_KEY: key, VALUE_KEY: amount } if map_policy is not None: op_dict[POLICY_KEY] = map_policy if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_decrement(bin_name, key, amount, map_policy=None, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_DECREMENT, BIN_KEY: bin_name, KEY_KEY: key, VALUE_KEY: amount } if map_policy is not None: op_dict[POLICY_KEY] = map_policy if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_size(bin_name, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_SIZE, BIN_KEY: bin_name } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_clear(bin_name, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_CLEAR, BIN_KEY: bin_name } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_key(bin_name, key, return_type, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_KEY, BIN_KEY: bin_name, KEY_KEY: key, RETURN_TYPE_KEY: return_type } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_key_list(bin_name, key_list, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_KEY_LIST, BIN_KEY: bin_name, VALUE_KEY: key_list, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_key_range(bin_name, key_range_start, key_range_end, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_KEY_RANGE, BIN_KEY: bin_name, KEY_KEY: key_range_start, VALUE_KEY: key_range_end, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_value(bin_name, value, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE, BIN_KEY: bin_name, VALUE_KEY: value, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_value_list(bin_name, value_list, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE_LIST, BIN_KEY: bin_name, VALUE_KEY: value_list, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_value_range(bin_name, value_start, value_end, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE, BIN_KEY: bin_name, VALUE_KEY: value_start, RANGE_KEY: value_end, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_index(bin_name, index, return_type, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_INDEX, BIN_KEY: bin_name, INDEX_KEY: index, RETURN_TYPE_KEY: return_type } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_index_range(bin_name, index_start, remove_amt, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE, BIN_KEY: bin_name, INDEX_KEY: index_start, VALUE_KEY: remove_amt, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_rank(bin_name, rank, return_type, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_RANK, BIN_KEY: bin_name, INDEX_KEY: rank, RETURN_TYPE_KEY: return_type } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_remove_by_rank_range(bin_name, rank_start, remove_amt, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_RANK_RANGE, BIN_KEY: bin_name, INDEX_KEY: rank_start, VALUE_KEY: remove_amt, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx is not None: op_dict[CTX_KEY] = ctx return op_dict def map_get_by_key(bin_name, key, return_type, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_GET_BY_KEY, BIN_KEY: bin_name, KEY_KEY: key, RETURN_TYPE_KEY: return_type } if ctx: op_dict[CTX_KEY] = ctx return op_dict def map_get_by_key_range(bin_name, key_range_start, key_range_end, return_type, inverted=False, ctx=None): op_dict = { OP_KEY: aerospike.OP_MAP_GET_BY_KEY_RANGE, BIN_KEY: bin_name, KEY_KEY: key_range_start, RANGE_KEY: key_range_end, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } if ctx: op_dict[CTX_KEY] = ctx return op_dict
Apache License 2.0
derwenai/pytextrank
pytextrank/base.py
BaseTextRank.segment_paragraphs
python
def segment_paragraphs ( self, sent_dist: typing.List[Sentence], ) -> typing.List[Paragraph]: para_elem: typing.List[int] = [] para_bounds: typing.List[typing.List[int]] = [] for sent_id, s in enumerate(self.doc.sents): toke_0 = str(s.__getitem__(0)) ret_count = sum(map(lambda c: 1 if c == "\n" else 0, toke_0)) if ret_count > 1: if len(para_elem) > 0: para_bounds.append(para_elem) para_elem = [] para_elem.append(sent_id) if len(para_elem) > 0: para_bounds.append(para_elem) para_list: typing.List[Paragraph] = [] for para_id, para_elem in enumerate(para_bounds): sum_dist = [ sent_dist[sent_id].distance for sent_id in para_elem ] para_list.append(Paragraph( para_id = para_id, start = para_elem[0], end = para_elem[-1], distance = sum(sum_dist) / float(len(sum_dist)), )) return para_list
Segment a ranked document into paragraphs. sent_dist: a list of ranked Sentence data objects returns: a list of Paragraph data objects
https://github.com/derwenai/pytextrank/blob/bda595d8a504e8d4ec9b47c1d909b7c1d10e5300/pytextrank/base.py#L731-L783
from collections import Counter, defaultdict, OrderedDict from dataclasses import dataclass import json import math import pathlib import time import typing from icecream import ic from spacy.tokens import Doc, Span, Token import graphviz import networkx as nx from .util import groupby_apply, default_scrubber try: import altair as alt import pandas as pd except ImportError: _has_altair_and_pandas = False else: _has_altair_and_pandas = True StopWordsLike = typing.Union[ str, pathlib.Path, typing.Dict[str, typing.List[str]] ] @dataclass(order=True, frozen=True) class Lemma: lemma: str pos: str def label ( self ) -> str: return str((self.lemma, self.pos,)) @dataclass class Phrase: text: str chunks: typing.List[Span] count: int rank: float @dataclass class Sentence: start: int end: int sent_id: int phrases: typing.Set[int] distance: float def empty ( self ) -> bool: return len(self.phrases) == 0 def text ( self, doc: Doc, ) -> str: return doc[self.start:self.end] @dataclass class VectorElem: phrase: Phrase phrase_id: int coord: float @dataclass class Paragraph: start: int end: int para_id: int distance: float class BaseTextRankFactory: _EDGE_WEIGHT: float = 1.0 _POS_KEPT: typing.List[str] = ["ADJ", "NOUN", "PROPN", "VERB"] _TOKEN_LOOKBACK: int = 3 def __init__ ( self, *, edge_weight: float = _EDGE_WEIGHT, pos_kept: typing.List[str] = None, token_lookback: int = _TOKEN_LOOKBACK, scrubber: typing.Optional[typing.Callable] = None, stopwords: typing.Optional[StopWordsLike] = None, ) -> None: self.edge_weight: float = edge_weight self.token_lookback: int = token_lookback if pos_kept: self.pos_kept: typing.List[str] = pos_kept else: self.pos_kept = self._POS_KEPT if scrubber: self.scrubber: typing.Callable = scrubber else: self.scrubber = default_scrubber if stopwords: self.stopwords: typing.Dict[str, typing.List[str]] = self._load_stopwords(stopwords) else: self.stopwords = defaultdict(list) @classmethod def _load_stopwords ( cls, stopwords: typing.Optional[StopWordsLike] = None, ) -> typing.Dict[str, typing.List[str]]: if isinstance(stopwords, dict): return stopwords if isinstance(stopwords, pathlib.Path): path: pathlib.Path = stopwords else: path = pathlib.Path(str) if path.exists(): with open(path, "r") as f: data = json.load(f) if data: return data.items() raise TypeError("cannot parse the stopwords source as a dictionary") def __call__ ( self, doc: Doc, ) -> Doc: Doc.set_extension("textrank", force=True, default=None) Doc.set_extension("phrases", force=True, default=[]) doc._.textrank = BaseTextRank( doc, edge_weight = self.edge_weight, pos_kept = self.pos_kept, token_lookback = self.token_lookback, scrubber = self.scrubber, stopwords = self.stopwords, ) doc._.phrases = doc._.textrank.calc_textrank() return doc class BaseTextRank: def __init__ ( self, doc: Doc, edge_weight: float, pos_kept: typing.List[str], token_lookback: int, scrubber: typing.Callable, stopwords: typing.Dict[str, typing.List[str]], ) -> None: self.doc: Doc = doc self.edge_weight: float = edge_weight self.token_lookback: int = token_lookback self.pos_kept: typing.List[str] = pos_kept self.scrubber: typing.Callable = scrubber self.stopwords: typing.Dict[str, typing.List[str]] = stopwords self.focus_tokens: typing.Set[str] = set() self.node_bias = 1.0 self.default_bias = 1.0 self.elapsed_time: float = 0.0 self.lemma_graph: nx.DiGraph = nx.DiGraph() self.phrases: typing.List[Phrase] = [] self.ranks: typing.Dict[Lemma, float] = {} self.seen_lemma: typing.Dict[Lemma, typing.Set[int]] = OrderedDict() def reset ( self ) -> None: self.elapsed_time = 0.0 self.lemma_graph = nx.DiGraph() self.phrases = [] self.ranks = {} self.seen_lemma = OrderedDict() def calc_textrank ( self ) -> typing.List[Phrase]: t0 = time.time() self.reset() self.lemma_graph = self._construct_graph() self.ranks = nx.pagerank_scipy( self.lemma_graph, personalization = self.get_personalization(), ) nc_phrases: typing.Dict[Span, float] = self._collect_phrases(self.doc.noun_chunks, self.ranks) ent_phrases: typing.Dict[Span, float] = self._collect_phrases(self.doc.ents, self.ranks) all_phrases: typing.Dict[Span, float] = { **nc_phrases, **ent_phrases } raw_phrase_list: typing.List[Phrase] = self._get_min_phrases(all_phrases) phrase_list: typing.List[Phrase] = sorted(raw_phrase_list, key=lambda p: p.rank, reverse=True) t1 = time.time() self.elapsed_time = (t1 - t0) * 1000.0 return phrase_list def get_personalization ( self ) -> typing.Optional[typing.Dict[Lemma, float]]: return None def _construct_graph ( self ) -> nx.DiGraph: g = nx.DiGraph() g.add_nodes_from(self.node_list) g.add_edges_from(self.edge_list) return g def _keep_token ( self, token: Token, ) -> bool: lemma = token.lemma_.lower().strip() if self._is_stopword(lemma, token): return False if token.pos_ not in self.pos_kept: return False key = Lemma(lemma, token.pos_,) if key not in self.seen_lemma: self.seen_lemma[key] = set([token.i]) else: self.seen_lemma[key].add(token.i) return True def _is_stopword ( self, lemma: str, token: Token, ) -> bool: return lemma in self.stopwords and token.pos_ in self.stopwords[lemma] @property def node_list ( self ) -> typing.List[Lemma]: nodes: typing.List[Lemma] = [ Lemma(token.lemma_, token.pos_) for token in self.doc if self._keep_token(token) ] return nodes @property def edge_list ( self ) -> typing.List[typing.Tuple[Lemma, Lemma, typing.Dict[str, float]]]: edges: typing.List[typing.Tuple[Lemma, Lemma]] = [] for sent in self.doc.sents: h = [ Lemma(token.lemma_, token.pos_) for token in sent if self._keep_token(token) ] for hop in range(self.token_lookback): for idx, node in enumerate(h[: -1 - hop]): nbor = h[hop + idx + 1] edges.append((node, nbor)) weighted_edges: typing.List[typing.Tuple[Lemma, Lemma, typing.Dict[str, float]]] = [ (*n, {"weight": w * self.edge_weight}) for n, w in Counter(edges).items() ] return weighted_edges def _collect_phrases ( self, spans: typing.Iterable[Span], ranks: typing.Dict[Lemma, float] ) -> typing.Dict[Span, float]: phrases: typing.Dict[Span, float] = { span: sum( ranks[Lemma(token.lemma_, token.pos_)] for token in span if self._keep_token(token) ) for span in spans } return { span: self._calc_discounted_normalised_rank(span, sum_rank) for span, sum_rank in phrases.items() } def _calc_discounted_normalised_rank ( self, span: Span, sum_rank: float ) -> float: non_lemma = len([tok for tok in span if tok.pos_ not in self.pos_kept]) non_lemma_discount = len(span) / (len(span) + (2.0 * non_lemma) + 1.0) phrase_rank = math.sqrt(sum_rank / (len(span) + non_lemma)) return phrase_rank * non_lemma_discount def _get_min_phrases ( self, all_phrases: typing.Dict[Span, float] ) -> typing.List[Phrase]: try: data: typing.List[typing.Tuple[Span, float, Span]] = [ (self.scrubber(span), rank, span) for span, rank in all_phrases.items() ] except AttributeError: raise FutureWarning("Text-based scrubbers are deprecated. Use a `Span` instead.") keyfunc = lambda x: x[0] applyfunc = lambda g: list((rank, spans) for text, rank, spans in g) phrases: typing.List[typing.Tuple[str, typing.List[typing.Tuple[float, Span]]]] = groupby_apply( data, keyfunc, applyfunc, ) phrase_list: typing.List[Phrase] = [ Phrase( text = p[0], rank = max(rank for rank, span in p[1]), count = len(p[1]), chunks = list(span for rank, span in p[1]), ) for p in phrases ] return phrase_list def get_unit_vector ( self, limit_phrases: int, ) -> typing.List[VectorElem]: unit_vector: typing.List[VectorElem] = [ VectorElem( phrase = p, phrase_id = phrase_id, coord = p.rank, ) for phrase_id, p in enumerate(self.doc._.phrases) ] limit = min(limit_phrases, len(unit_vector)) unit_vector = unit_vector[:limit] sum_length = sum([ elem.coord for elem in unit_vector ]) for elem in unit_vector: if sum_length > 0.0: elem.coord = elem.coord / sum_length else: elem.coord = 0.0 return unit_vector def calc_sent_dist ( self, limit_phrases: int, ) -> typing.List[Sentence]: unit_vector = self.get_unit_vector(limit_phrases) sent_dist: typing.List[Sentence] = [ Sentence( start = s.start, end = s.end, sent_id = sent_id, phrases = set(), distance = 0.0, ) for sent_id, s in enumerate(self.doc.sents) ] for elem in unit_vector: for chunk in elem.phrase.chunks: for sent in sent_dist: if chunk.start >= sent.start and chunk.end <= sent.end: sent.phrases.add(elem.phrase_id) break for sent in sent_dist: sum_sq = 0.0 for elem in unit_vector: if elem.phrase_id not in sent.phrases: sum_sq += elem.coord**2.0 sent.distance = math.sqrt(sum_sq) return sent_dist
MIT License
drug2ways/drug2ways
src/drug2ways/bel_helper.py
remove_contradictory_edges
python
def remove_contradictory_edges( increases: List[Any], decreases: List[Any], debug: bool = False, ) -> Tuple[List[Any], List[Any], List[Any]]: removed = [] for node in increases: if debug: logger.warning(f'Node: {node}') if node in decreases: if debug: logger.warning(f'{node} is contradictory') removed.append(node) else: if debug: logger.warning(f'{node} not contradictory') increases = [ i for i in increases if i not in removed ] decreases = [ i for i in decreases if i not in removed ] return increases, decreases, removed
Remove contradictory edges.
https://github.com/drug2ways/drug2ways/blob/c27f4d94ae8cf32b52186395f90d399889b84214/src/drug2ways/bel_helper.py#L101-L130
import logging from typing import Dict, Tuple, List, Any from networkx import DiGraph from pybel import BELGraph from pybel.constants import ( RELATION, INCREASES, DIRECTLY_DECREASES, DIRECTLY_INCREASES, DECREASES, REGULATES ) from pybel.struct.filters import is_causal_relation from pybel.struct.mutation import get_subgraph_by_edge_filter logger = logging.getLogger(__name__) RELATION_MAPPING_BEL = { INCREASES: 1, DIRECTLY_INCREASES: 1, DECREASES: -1, DIRECTLY_DECREASES: -1, REGULATES: 1, } def _change_relationships(edge: Dict) -> Tuple[bool, bool]: if 'increases' in edge[1]['relation'] or edge[1]['relation'] == 'positive_correlation': return True, True elif 'decreases' in edge[1]['relation'] or edge[1]['relation'] == 'negative_correlation': return True, False return False, False def process_bel_graph(bel_graph: BELGraph) -> DiGraph: bel_graph = get_subgraph_by_edge_filter(bel_graph, is_causal_relation) directed_graph = DiGraph() for source, target, data in bel_graph.edges(data=True): if data[RELATION] not in RELATION_MAPPING_BEL: logger.warning(f"Unknown relation {data[RELATION]}") continue directed_graph.add_edge(source.as_bel(), target.as_bel(), relation=RELATION_MAPPING_BEL[data[RELATION]]) return directed_graph def _is_target_node(node: str) -> bool: if node.startswith('bp') or node.startswith('path'): return True return False def _valid_source_node(node: str) -> bool: if not node.startswith('a'): return False if 'CHEBI' in node or 'PUBCHEM' in node: return True return False def get_candidate_drugs(graph): return [ node for node in graph.nodes() if _valid_source_node(node) ] def get_candidate_targets(graph): return [ node for node in graph.nodes() if _is_target_node(node) ]
Apache License 2.0
clericpy/ichrome
ichrome/sync_utils.py
Tab.set_url
python
def set_url(self, url=None, referrer=None, timeout=5): self.enable('Page') start_load_ts = self.now if url: self._url = url if referrer is None: data = self.send("Page.navigate", url=url, timeout=timeout) else: data = self.send("Page.navigate", url=url, referrer=referrer, timeout=timeout) else: data = self.send("Page.reload", timeout=timeout) time_passed = self.now - start_load_ts real_timeout = max((timeout - time_passed, 0)) if self.wait_loading(timeout=real_timeout) is None: self.send("Page.stopLoading", timeout=0) return data
Navigate the tab to the URL
https://github.com/clericpy/ichrome/blob/052519822bea48de0c7c574d042d75f0e457a8b7/ichrome/sync_utils.py#L388-L409
import json import threading import time import traceback from concurrent.futures._base import Error from weakref import WeakValueDictionary import websocket from torequests import NewFuture, tPool from torequests.utils import quote_plus from .daemon import ChromeDaemon from .exceptions import ChromeRuntimeError, ChromeTypeError from .logs import logger class Chrome(object): def __init__(self, host="127.0.0.1", port=9222, timeout=2, retry=1): self.req = tPool() self.host = host self.port = port self.timeout = timeout self.retry = retry if not self.ok: raise ChromeRuntimeError(f"Can not connect to {self.server}") @property def ok(self): r = self.req.get(self.server, timeout=self.timeout, retry=self.retry) if r.ok: return True return False def _get_tabs(self): try: r = self.req.get(self.server + "/json", timeout=self.timeout, retry=self.retry) return [ Tab( tab["id"], tab["title"], tab["url"], tab["webSocketDebuggerUrl"], self, ) for tab in r.json() if tab["type"] == "page" ] except Exception as error: logger.error(f'_get_tabs crashed for: {error!r}') raise error @property def server(self): return f"http://{self.host}:{self.port}" @property def tabs(self): return self._get_tabs() def new_tab(self, url=""): r = self.req.get( f"{self.server}/json/new?{quote_plus(url)}", retry=self.retry, timeout=self.timeout, ) if r.x and r.ok: rjson = r.json() tab_id, title, _url, webSocketDebuggerUrl = ( rjson["id"], rjson["title"], rjson["url"], rjson["webSocketDebuggerUrl"], ) tab = Tab(tab_id, title, _url, webSocketDebuggerUrl, self) tab._create_time = tab.now logger.info(f"new tab {tab}") return tab def activate_tab(self, tab_id): ok = False if isinstance(tab_id, Tab): tab_id = tab_id.tab_id r = self.req.get( f"{self.server}/json/activate/{tab_id}", retry=self.retry, timeout=self.timeout, ) if r.x and r.ok: if r.text == "Target activated": ok = True logger.info(f"activate_tab {tab_id}: {ok}") def close_tab(self, tab_id=None): ok = False tab_id = tab_id or self.tabs if isinstance(tab_id, Tab): tab_id = tab_id.tab_id r = self.req.get( f"{self.server}/json/close/{tab_id}", retry=self.retry, timeout=self.timeout, ) if r.x and r.ok: if r.text == "Target is closing": ok = True logger.info(f"close tab {tab_id}: {ok}") def close_tabs(self, tab_ids): return [self.close_tab(tab_id) for tab_id in tab_ids] def kill(self, timeout=None, max_deaths=1): ChromeDaemon.clear_chrome_process(self.port, timeout=timeout, max_deaths=max_deaths) @property def meta(self): r = self.req.get(f"{self.server}/json/version", retry=self.retry, timeout=self.timeout) if r.x and r.ok: return r.json() def __str__(self): return "[Chromote(tabs=%d)]" % len(self.tabs) def __repr__(self): return f"Chromote({self.server})" def __getitem__(self, index): tabs = self.tabs if isinstance(index, int): if len(tabs) > index: return tabs[index] elif isinstance(index, slice): return tabs.__getitem__(index) class Tab(object): def __init__(self, tab_id, title, url, webSocketDebuggerUrl, chrome, timeout=5): self.tab_id = tab_id self._title = title self._url = url self.webSocketDebuggerUrl = webSocketDebuggerUrl self.chrome = chrome self.timeout = timeout self.req = tPool() self._create_time = time.time() self._message_id = 0 self._listener = Listener() self.lock = threading.Lock() self.ws = websocket.WebSocket() self._connect() for target in [self._recv_daemon]: t = threading.Thread(target=target, daemon=True) t.start() def close_browser(self): return self.send('Browser.close') @property def url(self): return self.current_url def _connect(self): self.ws.connect(self.webSocketDebuggerUrl, timeout=self.timeout) def activate_tab(self): return self.chrome.activate_tab(self.tab_id) def close_tab(self): return self.chrome.close_tab(self.tab_id) def activate(self): return self.send("Page.bringToFront") def close(self): return self.send("Page.close") def crash(self): return self.send("Page.crash") def _recv_daemon(self): while self.ws.connected: try: data_str = self.ws.recv() logger.debug(data_str) if not data_str: continue try: data_dict = json.loads(data_str) if not isinstance(data_dict, dict): continue except (TypeError, json.decoder.JSONDecodeError): continue f = self._listener.find_future(data_dict) if f: f.set_result(data_str) except ( websocket._exceptions.WebSocketConnectionClosedException, websocket._exceptions.WebSocketTimeoutException, ConnectionResetError, ): break def send(self, method, timeout=None, callback_function=None, mute_log=False, **kwargs): try: timeout = self.timeout if timeout is None else timeout request = {"method": method, "params": kwargs} self._message_id += 1 request["id"] = self._message_id if not mute_log: logger.info(f"<{self}> send: {request}") with self.lock: self.ws.send(json.dumps(request)) request = {"id": request["id"]} res = self.recv(request, timeout=timeout, callback_function=callback_function) return res except ( websocket._exceptions.WebSocketTimeoutException, websocket._exceptions.WebSocketConnectionClosedException, ): self.refresh_ws() def recv(self, arg, timeout=None, callback_function=None): result = None timeout = self.timeout if timeout is None else timeout if timeout == 0: return result f = self._listener.register(arg, timeout=timeout) try: result = f.x except Error: result = None finally: self._listener.find_future(arg) return callback_function(result) if callable( callback_function) else result def refresh_ws(self): self.ws.close() self._connect() @property def now(self): return int(time.time()) def clear_cookies(self, timeout=0): return self.send("Network.clearBrowserCookies", timeout=timeout) def delete_cookies(self, name, url=None, domain=None, path=None, timeout=0): return self.send( "Network.deleteCookies", name=name, url=url, domain=domain, path=path, timeout=timeout, ) def get_cookies(self, urls=None, timeout=None): if urls: if isinstance(urls, str): urls = [urls] urls = list(urls) result = self.send("Network.getCookies", urls=urls, timeout=timeout) else: result = self.send("Network.getCookies", timeout=timeout) try: return json.loads(result)["result"]["cookies"] except Exception as error: logger.error(f'get_cookies crashed for: {error!r}') raise error @property def current_url(self): return json.loads( self.js("window.location.href"))["result"]["result"]["value"] @property def title(self): return json.loads( self.js("document.title"))["result"]["result"]["value"] @property def html(self): response = None try: response = self.js("document.documentElement.outerHTML") if not response: return "" result = json.loads(response) value = result["result"]["result"]["value"] return value except (KeyError, json.decoder.JSONDecodeError): logger.error( f"tab.content error {response}:\n{traceback.format_exc()}") return "" def enable(self, name: str): return self.send(f'{name}.enable', timeout=0) def disable(self, name: str): return self.send(f'{name}.disable', timeout=0) def wait_loading(self, wait_seconds=None, timeout=1, callback_function=None): self.enable('Page') data = self.wait_event("Page.loadEventFired", timeout=timeout, wait_seconds=wait_seconds, callback_function=callback_function) return data def wait_event( self, event="", timeout=None, callback_function=None, filter_function=None, wait_seconds=None, ): timeout = self.timeout if timeout is None else timeout start_time = time.time() while 1: request = {"method": event} result = self.recv(request, timeout=timeout) timeout_break = wait_seconds and time.time( ) - start_time > wait_seconds if timeout_break: break if result or timeout == 0: if callable(filter_function): if filter_function(result): break else: break return callback_function(result) if callable( callback_function) else result def reload(self, timeout=5): return self.set_url(timeout=timeout)
MIT License
demisto/demisto-sdk
demisto_sdk/commands/common/hook_validations/mapper.py
MapperValidator.is_id_equals_name
python
def is_id_equals_name(self): return super()._is_id_equals_name('mapper')
Check whether the mapper ID is equal to its name. Returns: bool. Whether the file id equals to its name
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/common/hook_validations/mapper.py#L201-L207
from distutils.version import LooseVersion import click from demisto_sdk.commands.common.constants import LAYOUT_AND_MAPPER_BUILT_IN_FIELDS from demisto_sdk.commands.common.errors import Errors from demisto_sdk.commands.common.hook_validations.content_entity_validator import ContentEntityValidator from demisto_sdk.commands.common.tools import get_all_incident_and_indicator_fields_from_id_set from demisto_sdk.commands.common.update_id_set import BUILT_IN_FIELDS FROM_VERSION = '6.0.0' VALID_TYPE_INCOMING = 'mapping-incoming' VALID_TYPE_OUTGOING = 'mapping-outgoing' class MapperValidator(ContentEntityValidator): def __init__(self, structure_validator, ignored_errors=None, print_as_warnings=False, suppress_print=False, json_file_path=None): super().__init__(structure_validator, ignored_errors=ignored_errors, print_as_warnings=print_as_warnings, suppress_print=suppress_print, json_file_path=json_file_path) self.from_version = '' self.to_version = '' def is_valid_mapper(self, validate_rn=True, id_set_file=None, is_circle=False): return all([ super().is_valid_file(validate_rn), self.is_valid_version(), self.is_valid_from_version(), self.is_valid_to_version(), self.is_to_version_higher_from_version(), self.is_valid_type(), self.is_incident_field_exist(id_set_file, is_circle), self.is_id_equals_name(), ]) def is_valid_version(self): return self._is_valid_version() def is_backward_compatible(self): answers = [ self.is_field_mapping_removed(), ] return not any(answers) def is_field_mapping_removed(self): old_mapper = self.old_file.get('mapping', {}) current_mapper = self.current_file.get('mapping', {}) old_incidents_types = {inc for inc in old_mapper} current_incidents_types = {inc for inc in current_mapper} if not old_incidents_types.issubset(current_incidents_types): removed_incident_types = old_incidents_types - current_incidents_types removed_dict = {} for removed in removed_incident_types: removed_dict[removed] = old_mapper[removed] error_message, error_code = Errors.removed_incident_types(removed_dict) if self.handle_error(error_message, error_code, file_path=self.file_path, warning=self.structure_validator.quite_bc): self.is_valid = False return True else: removed_incident_fields = {} for inc in old_incidents_types: old_incident_fields = old_mapper[inc].get('internalMapping', {}) current_incident_fields = current_mapper[inc].get('internalMapping', {}) old_fields = {inc for inc in old_incident_fields} current_fields = {inc for inc in current_incident_fields} if not old_fields.issubset(current_fields): removed_fields = old_fields - current_fields removed_incident_fields[inc] = removed_fields if removed_incident_fields: error_message, error_code = Errors.changed_incident_field_in_mapper(removed_incident_fields) if self.handle_error(error_message, error_code, file_path=self.file_path, warning=self.structure_validator.quite_bc): self.is_valid = False return True return False def is_valid_from_version(self): from_version = self.current_file.get('fromVersion', '') or self.current_file.get('fromversion') if from_version: self.from_version = from_version if LooseVersion(from_version) < LooseVersion(FROM_VERSION): error_message, error_code = Errors.invalid_from_version_in_mapper() if self.handle_error(error_message, error_code, file_path=self.file_path, suggested_fix=Errors.suggest_fix(self.file_path)): return False else: error_message, error_code = Errors.missing_from_version_in_mapper() if self.handle_error(error_message, error_code, file_path=self.file_path, suggested_fix=Errors.suggest_fix(self.file_path)): return False return True def is_valid_to_version(self): to_version = self.current_file.get('toVersion', '') or self.current_file.get('toversion', '') if to_version: self.to_version = to_version if LooseVersion(to_version) < LooseVersion(FROM_VERSION): error_message, error_code = Errors.invalid_to_version_in_mapper() if self.handle_error(error_message, error_code, file_path=self.file_path): return False return True def is_to_version_higher_from_version(self): if self.to_version and self.from_version: if LooseVersion(self.to_version) <= LooseVersion(self.from_version): error_message, error_code = Errors.from_version_higher_to_version() if self.handle_error(error_message, error_code, file_path=self.file_path): return False return True def is_valid_type(self): if self.current_file.get('type') not in [VALID_TYPE_INCOMING, VALID_TYPE_OUTGOING]: error_message, error_code = Errors.invalid_type_in_mapper() if self.handle_error(error_message, error_code, file_path=self.file_path): return False return True def is_incident_field_exist(self, id_set_file, is_circle) -> bool: if not is_circle: return True if not id_set_file: click.secho("Skipping mapper incident field validation. Could not read id_set.json.", fg="yellow") return True built_in_fields = [field.lower() for field in BUILT_IN_FIELDS] + LAYOUT_AND_MAPPER_BUILT_IN_FIELDS content_incident_fields = get_all_incident_and_indicator_fields_from_id_set(id_set_file, 'mapper') invalid_inc_fields_list = [] mapper = self.current_file.get('mapping', {}) for key, value in mapper.items(): incident_fields = value.get('internalMapping', {}) for inc_name, inc_info in incident_fields.items(): if self.current_file.get('type', {}) == "mapping-incoming": if inc_name not in content_incident_fields and inc_name.lower() not in built_in_fields: invalid_inc_fields_list.append(inc_name) if self.current_file.get('type', {}) == "mapping-outgoing": if inc_info['simple'] not in content_incident_fields and inc_info['simple'] not in built_in_fields and inc_info['simple'].split('.')[0] not in content_incident_fields and inc_info['simple']: invalid_inc_fields_list.append(inc_name) if inc_info['simple'] else None if invalid_inc_fields_list: error_message, error_code = Errors.invalid_incident_field_in_mapper(invalid_inc_fields_list) if self.handle_error(error_message, error_code, file_path=self.file_path): return False return True
MIT License
digitalglobe/gbdxtools
gbdxtools/workflow.py
Workflow.launch
python
def launch(self, workflow): try: r = self.gbdx_connection.post(self.workflows_url, json=workflow) try: r.raise_for_status() except: print("GBDX API Status Code: %s" % r.status_code) print("GBDX API Response: %s" % r.text) r.raise_for_status() workflow_id = r.json()['id'] return workflow_id except TypeError: self.logger.debug('Workflow not launched!')
Launches GBDX workflow. Args: workflow (dict): Dictionary specifying workflow tasks. Returns: Workflow id (str).
https://github.com/digitalglobe/gbdxtools/blob/8ddef9f8822a49126e059b56e465da7447e33244/gbdxtools/workflow.py#L33-L55
import json from gbdxtools.auth import Auth from gbdxtools.s3 import S3 class Workflow(object): def __init__(self, **kwargs): interface = Auth(**kwargs) self.base_url = '%s/workflows/v1' % interface.root_url self.workflows_url = '%s/workflows' % self.base_url self.gbdx_connection = interface.gbdx_connection self.s3 = S3() self.logger = interface.logger
MIT License
datastax/python-driver
cassandra/cqlengine/query.py
AbstractQuerySet.allow_filtering
python
def allow_filtering(self): clone = copy.deepcopy(self) clone._allow_filtering = True return clone
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
https://github.com/datastax/python-driver/blob/12a8adce943fe37a05ad6580e8bd302b65c2d93a/cassandra/cqlengine/query.py#L940-L946
import copy from datetime import datetime, timedelta from functools import partial import time import six from warnings import warn from cassandra.query import SimpleStatement, BatchType as CBatchType, BatchStatement from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine import connection as conn from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, LessThanOrEqualOperator, ContainsOperator, BaseWhereOperator) from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, UpdateStatement, InsertStatement, BaseCQLStatement, MapDeleteClause, ConditionalClause) class QueryException(CQLEngineException): pass class IfNotExistsWithCounterColumn(CQLEngineException): pass class IfExistsWithCounterColumn(CQLEngineException): pass class LWTException(CQLEngineException): def __init__(self, existing): super(LWTException, self).__init__("LWT Query was not applied") self.existing = existing class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass def check_applied(result): try: applied = result.was_applied except Exception: applied = True if not applied: raise LWTException(result.one()) class AbstractQueryableColumn(UnicodeMixin): def _get_column(self): raise NotImplementedError def __unicode__(self): raise NotImplementedError def _to_database(self, val): if isinstance(val, QueryValue): return val else: return self._get_column().to_database(val) def in_(self, item): return WhereClause(six.text_type(self), InOperator(), item) def contains_(self, item): return WhereClause(six.text_type(self), ContainsOperator(), item) def __eq__(self, other): return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): Unlogged = 'UNLOGGED' Counter = 'COUNTER' class BatchQuery(object): warn_multiple_exec = True _consistency = None _connection = None _connection_explicit = False def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, timeout=conn.NOT_SET, connection=None): self.queries = [] self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, (datetime, timedelta)): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp self._consistency = consistency self._execute_on_exception = execute_on_exception self._timeout = timeout self._callbacks = [] self._executed = False self._context_entered = False self._connection = connection if connection: self._connection_explicit = True def add_query(self, query): if not isinstance(query, BaseCQLStatement): raise CQLEngineException('only BaseCQLStatements can be added to a batch query') self.queries.append(query) def consistency(self, consistency): self._consistency = consistency def _execute_callbacks(self): for callback, args, kwargs in self._callbacks: callback(*args, **kwargs) def add_callback(self, fn, *args, **kwargs): if not callable(fn): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) self._callbacks.append((fn, args, kwargs)) def execute(self): if self._executed and self.warn_multiple_exec: msg = "Batch executed multiple times." if self._context_entered: msg += " If using the batch as a context manager, there is no need to call execute directly." warn(msg) self._executed = True if len(self.queries) == 0: self._execute_callbacks() return batch_type = None if self.batch_type is CBatchType.LOGGED else self.batch_type opener = 'BEGIN ' + (str(batch_type) + ' ' if batch_type else '') + ' BATCH' if self.timestamp: if isinstance(self.timestamp, six.integer_types): ts = self.timestamp elif isinstance(self.timestamp, (datetime, timedelta)): ts = self.timestamp if isinstance(self.timestamp, timedelta): ts += datetime.now() ts = int(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) else: raise ValueError("Batch expects a long, a timedelta, or a datetime") opener += ' USING TIMESTAMP {0}'.format(ts) query_list = [opener] parameters = {} ctx_counter = 0 for query in self.queries: query.update_context_id(ctx_counter) ctx = query.get_context() ctx_counter += len(ctx) query_list.append(' ' + str(query)) parameters.update(ctx) query_list.append('APPLY BATCH;') tmp = conn.execute('\n'.join(query_list), parameters, self._consistency, self._timeout, connection=self._connection) check_applied(tmp) self.queries = [] self._execute_callbacks() def __enter__(self): self._context_entered = True return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None and not self._execute_on_exception: return self.execute() class ContextQuery(object): def __init__(self, *args, **kwargs): from cassandra.cqlengine import models self.models = [] if len(args) < 1: raise ValueError("No model provided.") keyspace = kwargs.pop('keyspace', None) connection = kwargs.pop('connection', None) if kwargs: raise ValueError("Unknown keyword argument(s): {0}".format( ','.join(kwargs.keys()))) for model in args: try: issubclass(model, models.Model) except TypeError: raise ValueError("Models must be derived from base Model.") m = models._clone_model_class(model, {}) if keyspace: m.__keyspace__ = keyspace if connection: m.__connection__ = connection self.models.append(m) def __enter__(self): if len(self.models) > 1: return tuple(self.models) return self.models[0] def __exit__(self, exc_type, exc_val, exc_tb): return class AbstractQuerySet(object): def __init__(self, model): super(AbstractQuerySet, self).__init__() self.model = model self._where = [] self._conditional = [] self._order = [] self._allow_filtering = False self._limit = 10000 self._defer_fields = set() self._deferred_values = {} self._only_fields = [] self._values_list = False self._flat_values_list = False self._result_cache = None self._result_idx = None self._result_generator = None self._materialize_results = True self._distinct_fields = None self._count = None self._batch = None self._ttl = None self._consistency = None self._timestamp = None self._if_not_exists = False self._timeout = conn.NOT_SET self._if_exists = False self._fetch_size = None self._connection = None @property def column_family_name(self): return self.model.column_family_name() def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: connection = self._connection or self.model._get_connection() result = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result def __unicode__(self): return six.text_type(self._select_query()) def __str__(self): return str(self.__unicode__()) def __call__(self, *args, **kwargs): return self.filter(*args, **kwargs) def __deepcopy__(self, memo): clone = self.__class__(self.model) for k, v in self.__dict__.items(): if k in ['_con', '_cur', '_result_cache', '_result_idx', '_result_generator', '_construct_result']: clone.__dict__[k] = None elif k == '_batch': clone.__dict__[k] = self._batch elif k == '_timeout': clone.__dict__[k] = self._timeout else: clone.__dict__[k] = copy.deepcopy(v, memo) return clone def __len__(self): self._execute_query() return self.count() def _select_fields(self): return [] def _validate_select_where(self): def _select_query(self): if self._where: self._validate_select_where() return SelectStatement( self.column_family_name, fields=self._select_fields(), where=self._where, order_by=self._order, limit=self._limit, allow_filtering=self._allow_filtering, distinct_fields=self._distinct_fields, fetch_size=self._fetch_size ) def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: self._result_generator = (i for i in self._execute(self._select_query())) self._result_cache = [] self._construct_result = self._maybe_inject_deferred(self._get_result_constructor()) if self._materialize_results or self._distinct_fields: self._fill_result_cache() def _fill_result_cache(self): idx = 0 try: while True: idx += 1000 self._fill_result_cache_to_idx(idx) except StopIteration: pass self._count = len(self._result_cache) def _fill_result_cache_to_idx(self, idx): self._execute_query() if self._result_idx is None: self._result_idx = -1 qty = idx - self._result_idx if qty < 1: return else: for idx in range(qty): self._result_idx += 1 while True: try: self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) break except IndexError: self._result_cache.append(next(self._result_generator)) def __iter__(self): self._execute_query() idx = 0 while True: if len(self._result_cache) <= idx: try: self._result_cache.append(next(self._result_generator)) except StopIteration: break instance = self._result_cache[idx] if isinstance(instance, dict): self._fill_result_cache_to_idx(idx) yield self._result_cache[idx] idx += 1 def __getitem__(self, s): self._execute_query() if isinstance(s, slice): start = s.start if s.start else 0 if start < 0 or (s.stop is not None and s.stop < 0): warn("ModelQuerySet slicing with negative indices support will be removed in 4.0.", DeprecationWarning) end = s.stop if start < 0 or s.stop is None or s.stop < 0: end = self.count() try: self._fill_result_cache_to_idx(end) except StopIteration: pass return self._result_cache[start:s.stop:s.step] else: try: s = int(s) except (ValueError, TypeError): raise TypeError('QuerySet indices must be integers') if s < 0: warn("ModelQuerySet indexing with negative indices support will be removed in 4.0.", DeprecationWarning) if s < 0: num_results = self.count() s += num_results try: self._fill_result_cache_to_idx(s) except StopIteration: raise IndexError return self._result_cache[s] def _get_result_constructor(self): raise NotImplementedError @staticmethod def _construct_with_deferred(f, deferred, row): row.update(deferred) return f(row) def _maybe_inject_deferred(self, constructor): return partial(self._construct_with_deferred, constructor, self._deferred_values) if self._deferred_values else constructor def batch(self, batch_obj): if self._connection: raise CQLEngineException("Cannot specify the connection on model in batch mode.") if batch_obj is not None and not isinstance(batch_obj, BatchQuery): raise CQLEngineException('batch_obj must be a BatchQuery instance or None') clone = copy.deepcopy(self) clone._batch = batch_obj return clone def first(self): try: return six.next(iter(self)) except StopIteration: return None def all(self): return copy.deepcopy(self) def consistency(self, consistency): clone = copy.deepcopy(self) clone._consistency = consistency return clone def _parse_filter_arg(self, arg): statement = arg.rsplit('__', 1) if len(statement) == 1: return arg, None elif len(statement) == 2: return (statement[0], statement[1]) if arg != 'pk__token' else (arg, None) else: raise QueryException("Can't parse '{0}'".format(arg)) def iff(self, *args, **kwargs): if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on iff are not allowed") clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, ConditionalClause): raise QueryException('{0} is not a valid query operator'.format(operator)) clone._conditional.append(operator) for arg, val in kwargs.items(): if isinstance(val, Token): raise QueryException("Token() values are not valid in conditionals") col_name, col_op = self._parse_filter_arg(arg) try: column = self.model._get_column(col_name) except KeyError: raise QueryException("Can't resolve column name: '{0}'".format(col_name)) if isinstance(val, BaseQueryFunction): query_val = val else: query_val = column.to_database(val) operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() clone._conditional.append(WhereClause(column.db_field_name, operator, query_val)) return clone def filter(self, *args, **kwargs): if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on filter are not allowed") clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, WhereClause): raise QueryException('{0} is not a valid query operator'.format(operator)) clone._where.append(operator) for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) quote_field = True if not isinstance(val, Token): try: column = self.model._get_column(col_name) except KeyError: raise QueryException("Can't resolve column name: '{0}'".format(col_name)) else: if col_name != 'pk__token': raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") column = columns._PartitionKeysToken(self.model) quote_field = False partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( 'Token() received {0} arguments but model has {1} partition keys'.format( len(val.value), len(partition_columns))) val.set_columns(partition_columns) operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() if isinstance(operator, InOperator): if not isinstance(val, (list, tuple)): raise QueryException('IN queries must use a list/tuple value') query_val = [column.to_database(v) for v in val] elif isinstance(val, BaseQueryFunction): query_val = val elif (isinstance(operator, ContainsOperator) and isinstance(column, (columns.List, columns.Set, columns.Map))): query_val = val else: query_val = column.to_database(val) if not col_op: clone._defer_fields.add(column.db_field_name) clone._deferred_values[column.db_field_name] = val clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) return clone def get(self, *args, **kwargs): if args or kwargs: return self.filter(*args, **kwargs).get() self._execute_query() try: self[1] raise self.model.MultipleObjectsReturned('Multiple objects found') except IndexError: pass try: obj = self[0] except IndexError: raise self.model.DoesNotExist return obj def _get_ordering_condition(self, colname): order_type = 'DESC' if colname.startswith('-') else 'ASC' colname = colname.replace('-', '') return colname, order_type def order_by(self, *colnames): if len(colnames) == 0: clone = copy.deepcopy(self) clone._order = [] return clone conditions = [] for colname in colnames: conditions.append('"{0}" {1}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) return clone def count(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._count is None: query = self._select_query() query.count = True result = self._execute(query) count_row = result.one().popitem() self._count = count_row[1] return self._count def distinct(self, distinct_fields=None): clone = copy.deepcopy(self) if distinct_fields: clone._distinct_fields = distinct_fields else: clone._distinct_fields = [x.column_name for x in self.model._partition_keys.values()] return clone def limit(self, v): if v is None: v = 0 if not isinstance(v, six.integer_types): raise TypeError if v == self._limit: return self if v < 0: raise QueryException("Negative limit is not allowed") clone = copy.deepcopy(self) clone._limit = v return clone def fetch_size(self, v): if not isinstance(v, six.integer_types): raise TypeError if v == self._fetch_size: return self if v < 1: raise QueryException("fetch size less than 1 is not allowed") clone = copy.deepcopy(self) clone._fetch_size = v return clone
Apache License 2.0
optapy/optapy
optapy-core/src/main/python/annotations.py
constraint_provider
python
def constraint_provider(constraint_provider_function): ensure_init() constraint_provider_function.__javaClass = _generate_constraint_provider_class(constraint_provider_function) return constraint_provider_function
Marks a function as a ConstraintProvider. The function takes a single parameter, the ConstraintFactory, and must return a list of Constraints. To create a Constraint, start with ConstraintFactory.from(get_class(PythonClass)).
https://github.com/optapy/optapy/blob/f8721a57806c1527509716c63ab7c1baec4185af/optapy-core/src/main/python/annotations.py#L233-L242
from .optaplanner_java_interop import ensure_init, _add_deep_copy_to_class, _generate_planning_entity_class, _generate_problem_fact_class, _generate_planning_solution_class, _generate_constraint_provider_class from jpype import JImplements def planning_id(getter_function): ensure_init() from org.optaplanner.core.api.domain.lookup import PlanningId as JavaPlanningId getter_function.__optaplannerPlanningId = { 'annotationType': JavaPlanningId } return getter_function def planning_variable(variable_type, value_range_provider_refs, nullable=False, graph_type=None, strength_comparator_class=None, strength_weight_factory_class=None): def planning_variable_function_wrapper(variable_getter_function): ensure_init() from org.optaplanner.core.api.domain.variable import PlanningVariable as JavaPlanningVariable variable_getter_function.__optaplannerPlanningVariable = { 'annotationType': JavaPlanningVariable, 'valueRangeProviderRefs': value_range_provider_refs, 'nullable': nullable, 'graphType': graph_type, 'strengthComparatorClass': strength_comparator_class, 'strengthWeightFactoryClass': strength_weight_factory_class } variable_getter_function.__return = variable_type.__javaClass return variable_getter_function return planning_variable_function_wrapper def problem_fact_collection_property(fact_type): def problem_fact_collection_property_function_mapper(getter_function): ensure_init() from org.optaplanner.optapy import PythonWrapperGenerator from org.optaplanner.core.api.domain.solution import ProblemFactCollectionProperty as JavaProblemFactCollectionProperty getter_function.__return = PythonWrapperGenerator.getArrayClass(fact_type.__javaClass) getter_function.__optaplannerPlanningEntityCollectionProperty = { 'annotationType': JavaProblemFactCollectionProperty } return getter_function return problem_fact_collection_property_function_mapper def planning_entity_collection_property(entity_type): def planning_entity_collection_property_function_mapper(getter_function): ensure_init() from org.optaplanner.optapy import PythonWrapperGenerator from org.optaplanner.core.api.domain.solution import PlanningEntityCollectionProperty as JavaPlanningEntityCollectionProperty getter_function.__optaplannerPlanningEntityCollectionProperty = { 'annotationType': JavaPlanningEntityCollectionProperty } getter_function.__return = PythonWrapperGenerator.getArrayClass(entity_type.__javaClass) return getter_function return planning_entity_collection_property_function_mapper def value_range_provider(range_id): def value_range_provider_function_wrapper(getter_function): ensure_init() from org.optaplanner.core.api.domain.valuerange import ValueRangeProvider as JavaValueRangeProvider getter_function.__optaplannerValueRangeProvider = { 'annotationType': JavaValueRangeProvider, 'id': range_id } return getter_function return value_range_provider_function_wrapper def planning_score(score_type, bendable_hard_levels_size=None, bendable_soft_levels_size=None, score_definition_class=None): def planning_score_function_wrapper(getter_function): ensure_init() from org.optaplanner.core.api.domain.solution import PlanningScore as JavaPlanningScore getter_function.__optaplannerPlanningScore = { 'annotationType': JavaPlanningScore, 'bendableHardLevelsSize': bendable_hard_levels_size, 'bendableSoftLevelsSize': bendable_soft_levels_size, 'scoreDefinitionClass': score_definition_class } getter_function.__return = score_type return getter_function return planning_score_function_wrapper def planning_entity(entity_class): ensure_init() out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(entity_class) out.__javaClass = _generate_planning_entity_class(entity_class) _add_deep_copy_to_class(out) return out def problem_fact(fact_class): ensure_init() out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(fact_class) out.__javaClass = _generate_problem_fact_class(fact_class) _add_deep_copy_to_class(out) return out def planning_solution(planning_solution_class): ensure_init() out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(planning_solution_class) out.__javaClass = _generate_planning_solution_class(planning_solution_class) _add_deep_copy_to_class(out) return out
Apache License 2.0
hazyresearch/ukb-cardiac-mri
ukb/metrics/base.py
dcg_score
python
def dcg_score(y_true, y_score, k=None): k = len(y_true) if k is None else k order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gain = 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gain / discounts)
Function for Discounted Cumulative Gain
https://github.com/hazyresearch/ukb-cardiac-mri/blob/3177dde898a65b1d7f385b78e4f134de3852bea5/ukb/metrics/base.py#L126-L137
from __future__ import print_function from __future__ import division import torch import torch.nn as nn from torch.autograd import Variable import numpy as np from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix, classification_report, log_loss, roc_auc_score, roc_curve, precision_recall_curve, auc __all__ = ["binary_scores_from_counts", "print_metricatk", "print_scores", "classification_summary", "prc_auc_score", "dcg_score", "ndcg_score", "ndcg_score2", "f1_score", "precision_score", "recall_score", "accuracy_score", "confusion_matrix", "classification_report", "log_loss", "roc_auc_score", "roc_curve", "precision_recall_curve", "auc"] def binary_scores_from_counts(ntp, nfp, ntn, nfn): prec = ntp / float(ntp + nfp) if ntp + nfp > 0 else 0.0 rec = ntp / float(ntp + nfn) if ntp + nfn > 0 else 0.0 f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0 return prec, rec, f1 def print_metricatk(y_true, y_pred, y_proba): sorted_indexes = np.argsort(y_proba) print("========================================") print("Metric at K (5, 10, ...)") print("========================================") for k in range(5, y_true.shape[0], 5): target = sorted_indexes[-k:] prec = y_true[target].sum()/float(k) rec = y_true[target].sum()/float(y_true.sum()) f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0 print("At {:>4}: | Precision: {:5.1f} | Recall: {:5.1f} | F1: {:5.1f}".format(k, prec*100.0, rec*100.0, f1*100.0)) if rec == 1: break def print_scores(ntp, nfp, ntn, nfn, pos_acc, neg_acc, prec, rec, f1, roc, prc, ndcg, title='Scores'): print("========================================") print(title) print("========================================") print("Pos. class accuracy: {:2.1f}".format(pos_acc * 100)) print("Neg. class accuracy: {:2.1f}".format(neg_acc * 100)) print("----------------------------------------") print("AUC: {:2.1f}".format(roc * 100)) print("PRC: {:2.1f}".format(prc * 100)) print("NDCG: {:2.1f}".format(ndcg * 100)) print("----------------------------------------") print("Precision: {:2.1f}".format(prec * 100)) print("Recall: {:2.1f}".format(rec * 100)) print("F1: {:2.1f}".format(f1 * 100)) print("----------------------------------------") print("TP: {} | FP: {} | TN: {} | FN: {}".format(ntp, nfp, ntn, nfn)) print("========================================\n") def classification_summary(y_true, y_pred, classes, y_proba, verbose=True): roc = roc_auc_score(y_true, y_proba) prc = prc_auc_score(y_true, y_proba) if len(classes) <= 2: tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=[0,1]).ravel() prec, rec, f1 = binary_scores_from_counts(tp, fp, tn, fn) pos_acc = tp / float(tp + fn) if tp + fn > 0 else 0.0 neg_acc = tn / float(tn + fp) if tn + fp > 0 else 0.0 ndcg = ndcg_score(y_true, y_proba) if verbose: print_scores(tp, fp, tn, fn, pos_acc, neg_acc, prec, rec, f1, roc, prc, ndcg) header = ["ndcg", "roc", "prc", "precision", "recall", "f1", "pos_acc", "neg_acc", "tp", "fp", "tn", "fn"] return dict(zip(header,(ndcg, roc, prc, prec, rec, f1, pos_acc, neg_acc, tp, fp, tn, fn))) else: print(classification_report(y_true, y_pred, target_names=classes, digits=3)) return {} def prc_auc_score(y_true, y_prob): precision, recall, _ = precision_recall_curve(y_true, y_prob) prc_auc = auc(recall, precision) return prc_auc
Apache License 2.0
python-acoustics/python-acoustics
acoustics/standards/iso_1996_2_2007.py
Tonality.results_as_dataframe
python
def results_as_dataframe(self): data = ((tone.center, tone.tone_level, tone.bandwidth_3db, tone.critical_band.start, tone.critical_band.end, tone.critical_band.bandwidth, tone.critical_band.regression_slope, tone.critical_band.regression_intercept, tone.critical_band.masking_noise_level, tone.critical_band.total_tone_level, tone.critical_band.tonal_audibility, tone.critical_band.adjustment) for tone in self.tones) columns = [ 'center', 'tone_level', 'bandwidth_3db', 'critical_band_start', 'critical_band_end', 'critical_band_bandwidth', 'regression_slope', 'regression_intercept', 'masking_noise_level', 'total_tone_level', 'tonal_audibility', 'adjustment' ] return pd.DataFrame(list(data), columns=columns)
Return results in dataframe.
https://github.com/python-acoustics/python-acoustics/blob/fbc87454422c41e1a39e282d7680126a6d8014dd/acoustics/standards/iso_1996_2_2007.py#L417-L429
import numpy as np import pandas as pd from scipy.signal import welch from scipy.stats import linregress import matplotlib.pyplot as plt from acoustics.decibel import dbsum from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE import weakref from tabulate import tabulate TONE_WITHIN_PAUSE_CRITERION_DB = 6.0 TONE_BANDWIDTH_CRITERION_DB = 3.0 TONE_LINES_CRITERION_DB = 6.0 TONE_SEEK_CRITERION = 1.0 REGRESSION_RANGE_FACTOR = 0.75 _WINDOW_CORRECTION = { 'hanning': -1.8, } def window_correction(window): try: return _WINDOW_CORRECTION[window] except KeyError: raise ValueError("Window correction is not available for specified window.") def critical_band(frequency): if isinstance(frequency, np.ndarray): center = frequency.copy() center[frequency < 50.0] = 50.0 else: center = 50.0 if frequency < 50 else frequency bandwidth = (center > 500.0) * (center * 0.20) + (center <= 500.0) * 100.0 upper = center + bandwidth / 2.0 lower = center - bandwidth / 2.0 return center, lower, upper, bandwidth def tones_level(tone_levels): return dbsum(tone_levels) def masking_noise_level(noise_lines, frequency_resolution, effective_analysis_bandwidth): return dbsum(noise_lines) + 10.0 * np.log10(frequency_resolution / effective_analysis_bandwidth) def masking_noise_lines(levels, line_classifier, center, bandwidth, regression_range_factor): slicer = slice(center - bandwidth * regression_range_factor, center + bandwidth * regression_range_factor) levels = levels[slicer] frequencies = levels.index regression_levels = levels[line_classifier == 'noise'] slope, intercept = linregress(x=regression_levels.index, y=regression_levels)[0:2] levels_from_regression = slope * frequencies + intercept return levels_from_regression, slope, intercept def tonal_audibility(tones_level, masking_noise_level, center): return tones_level - masking_noise_level + 2.0 + np.log10(1.0 + (center / 502.0)**(2.5)) def tonal_adjustment(tonal_audibility): if tonal_audibility > 10.0: return 6.0 elif tonal_audibility < 4.0: return 0.0 else: return tonal_audibility - 4.0 class Tonality: def __init__( self, signal, sample_frequency, window='hanning', reference_pressure=REFERENCE_PRESSURE, tsc=TONE_SEEK_CRITERION, regression_range_factor=REGRESSION_RANGE_FACTOR, nbins=None, force_tone_without_pause=False, force_bandwidth_criterion=False, ): self.signal = signal self.sample_frequency = sample_frequency self.window = window self.reference_pressure = reference_pressure self.tsc = tsc self.regression_range_factor = regression_range_factor self.nbins = nbins self._noise_pauses = list() self._spectrum = None self.force_tone_without_pause = force_tone_without_pause self.force_bandwidth_criterion = force_bandwidth_criterion @property def noise_pauses(self): for noise_pause in self._noise_pauses: yield noise_pause @property def tones(self): for noise_pause in self.noise_pauses: if noise_pause.tone is not None: yield noise_pause.tone @property def critical_bands(self): for tone in self.tones: yield tone.critical_band @property def spectrum(self): if self._spectrum is None: nbins = self.nbins if nbins is None: nbins = self.sample_frequency nbins //= 1 f, p = welch(self.signal, fs=self.sample_frequency, nperseg=nbins, window=self.window, detrend=False, scaling='spectrum') self._spectrum = pd.Series(10.0 * np.log10(p / self.reference_pressure**2.0), index=f) return self._spectrum @property def frequency_resolution(self): df = np.diff(np.array(self.spectrum.index)).mean() return df @property def effective_analysis_bandwidth(self): if self.window == 'hanning': return 1.5 * self.frequency_resolution else: raise ValueError() def _set_noise_pauses(self, noise_pauses): self._noise_pauses = [NoisePause(start, end) for start, end in noise_pauses] return self def determine_noise_pauses(self, end=None): self._set_noise_pauses(noise_pause_seeker(np.array(self.spectrum[:end]), self.tsc)) return self def _construct_line_classifier(self): levels = self.spectrum categories = ['noise', 'start', 'end', 'neither', 'tone'] self.line_classifier = pd.Series( pd.Categorical(['noise'] * len(levels), categories=categories), index=levels.index) for noise_pause in self.noise_pauses: self.line_classifier.iloc[noise_pause.start] = 'start' self.line_classifier.iloc[noise_pause.end] = 'end' self.line_classifier.iloc[noise_pause.start + 1:noise_pause.end] = 'neither' for tone in self.tones: self.line_classifier.iloc[tone._tone_lines] = 'tone' return self def _determine_tones(self): levels = self.spectrum for noise_pause in self.noise_pauses: tone_indices, bandwidth_for_tone_criterion = determine_tone_lines( levels, self.frequency_resolution, noise_pause.start, noise_pause.end, self.force_tone_without_pause, self.force_bandwidth_criterion, ) if np.any(tone_indices): noise_pause.tone = create_tone(levels, tone_indices, bandwidth_for_tone_criterion, weakref.proxy(noise_pause)) return self def _determine_critical_bands(self): for tone in self.tones: critical_band = self.critical_band_at(tone.center) tone.critical_band = critical_band critical_band.tone = weakref.proxy(tone) return self def analyse(self): self._determine_tones() self._construct_line_classifier() self._determine_critical_bands() return self def critical_band_at(self, frequency): return create_critical_band(self.spectrum, self.line_classifier, frequency, self.frequency_resolution, self.effective_analysis_bandwidth, self.regression_range_factor, self.window) def plot_spectrum(self): spectrum = self.spectrum fig = plt.figure() ax = fig.add_subplot(111) ax.plot(spectrum.index, spectrum) ax.set_xlabel('f in Hz') ax.set_ylabel('L in dB') return fig @property def dominant_tone(self): try: return sorted(self.tones, key=lambda x: x.critical_band.tonal_audibility, reverse=True)[0] except IndexError: return None def plot_results(self, noise_pauses=False, tones=True, critical_bands=True): df = self.frequency_resolution levels = self.spectrum fig = plt.figure() ax = fig.add_subplot(111) ax.plot(levels.index, levels) ax.set_xlabel("$f$ in Hz") ax.set_ylabel("$L$ in dB") if noise_pauses: for pause in self.noise_pauses: ax.axvspan(pause.start * df, pause.end * df, color='green', alpha=0.05) if tones: for tone in self.tones: ax.axvline(tone.center, color='black', alpha=0.05) if critical_bands: for band in self.critical_bands: ax.axvspan(band.start, band.end, color='yellow', alpha=0.05) band = self.dominant_tone.critical_band ax.axvline(band.start, color='red', linewidth=0.1) ax.axvline(band.end, color='red', linewidth=0.1) if noise_pauses: _items = list(self.noise_pauses) elif critical_bands: _items = list(self.critical_bands) ax.set_xlim(min(item.start for item in _items), max(item.end for item in _items)) return fig def overview(self): try: cb = self.dominant_tone.critical_band except AttributeError: raise ValueError("Cannot show overview (yet). No tones have been determined.") tones = [("Tone", "{:4.1f} Hz: {:4.1f} dB".format(tone.center, tone.tone_level)) for tone in self.tones] table = [ ("Critical band", "{:4.1f} to {:4.1f} Hz".format(cb.start, cb.end)), ("Masking noise level $L_{pn}$", "{:4.1f} dB".format(cb.masking_noise_level)), ("Tonal level $L_{pt}$", "{:4.1f} dB".format(cb.total_tone_level)), ("Dominant tone", "{:4.1f} Hz".format(cb.tone.center)), ("3 dB bandwidth of tone", "{:2.1f}% of {:4.1f}".format(cb.tone.bandwidth_3db / cb.bandwidth * 100.0, cb.bandwidth)), ("Tonal audibility $L_{ta}$", "{:4.1f} dB".format(cb.tonal_audibility)), ("Adjustment $K_{t}$", "{:4.1f} dB".format(cb.adjustment)), ("Frequency resolution", "{:4.1f} Hz".format(self.frequency_resolution)), ("Effective analysis bandwidth", "{:4.1f} Hz".format(self.effective_analysis_bandwidth)), ] table += tones return tabulate(table)
BSD 3-Clause New or Revised License
ericmjl/nxviz
nxviz/utils.py
is_data_diverging
python
def is_data_diverging(data_container: Iterable): assert infer_data_type(data_container) in [ "ordinal", "continuous", ], "Data type should be ordinal or continuous" has_negative = False has_positive = False for i in data_container: if i < 0: has_negative = True elif i > 0: has_positive = True if has_negative and has_positive: return True else: return False
We want to use this to check whether the data are diverging or not. This is a simple check, can be made much more sophisticated. :param data_container: A generic container of data points. :type data_container: `iterable`
https://github.com/ericmjl/nxviz/blob/d4cad2cf961ceaa97bb5770ec21925f141d532a4/nxviz/utils.py#L105-L130
from collections import Counter import pandas as pd import warnings from typing import Iterable def is_data_homogenous(data_container: Iterable): data_types = set([type(i) for i in data_container]) return len(data_types) == 1 def infer_data_type(data_container: Iterable): warnings.warn( "`infer_data_type` is deprecated! " "Please use `infer_data_family` instead!" ) assert isinstance(data_container, list) or isinstance( data_container, tuple ), "data_container should be a list or tuple." assert ( len(set(data_container)) > 1 ), "There should be more than one value in the data container." assert is_data_homogenous(data_container), "Data are not of a homogenous type!" datum = data_container[0] if len(set(data_container)) == 2: return "categorical" elif isinstance(datum, str): return "categorical" elif isinstance(datum, int): return "ordinal" elif isinstance(datum, float): return "continuous" else: raise ValueError("Not possible to tell what the data type is.") def infer_data_family(data: pd.Series): if data.dtype == float: if data.min() < 0 and data.max() > 0: return "divergent" return "continuous" if data.dtype == int: if len(set(data)) > 9: return "continuous" return "ordinal" return "categorical"
MIT License
praveen-palanisamy/macad-gym
src/macad_gym/carla/PythonAPI/agents/navigation/agent.py
Agent._is_light_red_us_style
python
def _is_light_red_us_style(self, lights_list, debug=False): ego_vehicle_location = self._vehicle.get_location() ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location) if ego_vehicle_waypoint.is_intersection: return (False, None) if self._local_planner.target_waypoint is not None: if self._local_planner.target_waypoint.is_intersection: min_angle = 180.0 sel_magnitude = 0.0 sel_traffic_light = None for traffic_light in lights_list: loc = traffic_light.get_location() magnitude, angle = compute_magnitude_angle(loc, ego_vehicle_location, self._vehicle.get_transform().rotation.yaw) if magnitude < 80.0 and angle < min(25.0, min_angle): sel_magnitude = magnitude sel_traffic_light = traffic_light min_angle = angle if sel_traffic_light is not None: if debug: print('=== Magnitude = {} | Angle = {} | ID = {}'.format( sel_magnitude, min_angle, sel_traffic_light.id)) if self._last_traffic_light is None: self._last_traffic_light = sel_traffic_light if self._last_traffic_light.state == carla.TrafficLightState.Red: return (True, self._last_traffic_light) else: self._last_traffic_light = None return (False, None)
This method is specialized to check US style traffic lights. :param lights_list: list containing TrafficLight objects :return: a tuple given by (bool_flag, traffic_light), where - bool_flag is True if there is a traffic light in RED affecting us and False otherwise - traffic_light is the object itself or None if there is no red traffic light affecting us
https://github.com/praveen-palanisamy/macad-gym/blob/38884ac4bc7fb2e91a865950cff4eeadeed02f60/src/macad_gym/carla/PythonAPI/agents/navigation/agent.py#L107-L153
from enum import Enum import carla from agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle class AgentState(Enum): NAVIGATING = 1 BLOCKED_BY_VEHICLE = 2 BLOCKED_RED_LIGHT = 3 class Agent(object): def __init__(self, vehicle): self._vehicle = vehicle self._proximity_threshold = 10.0 self._local_planner = None self._world = self._vehicle.get_world() self._map = self._vehicle.get_world().get_map() self._last_traffic_light = None def run_step(self, debug=False): control = carla.VehicleControl() if debug: control.steer = 0.0 control.throttle = 0.0 control.brake = 0.0 control.hand_brake = False control.manual_gear_shift = False return control def _is_light_red(self, lights_list): if self._map.name == 'Town01' or self._map.name == 'Town02': return self._is_light_red_europe_style(lights_list) else: return self._is_light_red_us_style(lights_list) def _is_light_red_europe_style(self, lights_list): ego_vehicle_location = self._vehicle.get_location() ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location) for traffic_light in lights_list: object_waypoint = self._map.get_waypoint(traffic_light.get_location()) if object_waypoint.road_id != ego_vehicle_waypoint.road_id or object_waypoint.lane_id != ego_vehicle_waypoint.lane_id: continue loc = traffic_light.get_location() if is_within_distance_ahead(loc, ego_vehicle_location, self._vehicle.get_transform().rotation.yaw, self._proximity_threshold): if traffic_light.state == carla.TrafficLightState.Red: return (True, traffic_light) return (False, None)
MIT License
humancompatibleai/imitation
src/imitation/envs/examples/model_envs.py
make_random_state_dist
python
def make_random_state_dist( n_avail: int, n_states: int, rand_state: np.random.RandomState = np.random, ) -> np.ndarray: assert 0 < n_avail <= n_states init_dist = np.zeros((n_states,)) next_states = rand_state.choice(n_states, size=(n_avail,), replace=False) avail_state_dist = rand_state.dirichlet(np.ones((n_avail,))) init_dist[next_states] = avail_state_dist assert np.sum(init_dist > 0) == n_avail init_dist = init_dist / np.sum(init_dist) return init_dist
Make a random initial state distribution over n_states. Args: n_avail: Number of states available to transition into. n_states: Total number of states. rand_state: NumPy random state. Returns: An initial state distribution that is zero at all but a uniformly random chosen subset of `n_avail` states. This subset of chosen states are set to a sample from the uniform distribution over the (n_avail-1) simplex, aka the flat Dirichlet distribution. Raises: ValueError: If `n_avail` is not in the range `(0, n_states]`.
https://github.com/humancompatibleai/imitation/blob/4ccc96f9e4920dd4a2c4dd01d90978c76f75ea3c/src/imitation/envs/examples/model_envs.py#L51-L79
from typing import Optional import gym import numpy as np from imitation.envs.resettable_env import TabularModelEnv def make_random_trans_mat( n_states, n_actions, max_branch_factor, rand_state=np.random, ) -> np.ndarray: out_mat = np.zeros((n_states, n_actions, n_states), dtype="float32") for start_state in range(n_states): for action in range(n_actions): succs = rand_state.randint(1, max_branch_factor + 1) next_states = rand_state.choice(n_states, size=(succs,), replace=False) next_vec = rand_state.dirichlet(np.ones((succs,))) next_vec = next_vec / np.sum(next_vec) out_mat[start_state, action, next_states] = next_vec return out_mat
MIT License
algorand/pyteal
pyteal/ast/binaryexpr.py
ExtractUint16
python
def ExtractUint16(string: Expr, offset: Expr) -> BinaryExpr: return BinaryExpr( Op.extract_uint16, (TealType.bytes, TealType.uint64), TealType.uint64, string, offset, )
Extract 2 bytes (16 bits) and convert them to an integer. The bytes starting at :code:`offset` up to but not including :code:`offset + 2` will be interpreted as a big-endian unsigned integer. If :code:`offset + 2` exceeds :code:`Len(string)`, the program fails. Requires TEAL version 5 or higher. Args: string: The bytestring to extract from. Must evaluate to bytes. offset: The offset in the bytestring to start extracing. Must evaluate to uint64.
https://github.com/algorand/pyteal/blob/5636ccd000df8f612d86b588b103186641ee192d/pyteal/ast/binaryexpr.py#L529-L549
from typing import Union, Tuple, cast, TYPE_CHECKING from ..types import TealType, require_type from ..errors import verifyTealVersion from ..ir import TealOp, Op, TealBlock from .expr import Expr if TYPE_CHECKING: from ..compiler import CompileOptions class BinaryExpr(Expr): def __init__( self, op: Op, inputType: Union[TealType, Tuple[TealType, TealType]], outputType: TealType, argLeft: Expr, argRight: Expr, ) -> None: super().__init__() if type(inputType) is tuple: leftType, rightType = inputType else: leftType = cast(TealType, inputType) rightType = leftType require_type(argLeft.type_of(), leftType) require_type(argRight.type_of(), rightType) self.op = op self.outputType = outputType self.argLeft = argLeft self.argRight = argRight def __teal__(self, options: "CompileOptions"): verifyTealVersion( self.op.min_version, options.version, "TEAL version too low to use op {}".format(self.op), ) return TealBlock.FromOp( options, TealOp(self, self.op), self.argLeft, self.argRight ) def __str__(self): return "({} {} {})".format(self.op, self.argLeft, self.argRight) def type_of(self): return self.outputType def has_return(self): return False BinaryExpr.__module__ = "pyteal" def Add(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.add, TealType.uint64, TealType.uint64, left, right) def Minus(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.minus, TealType.uint64, TealType.uint64, left, right) def Mul(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.mul, TealType.uint64, TealType.uint64, left, right) def Div(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.div, TealType.uint64, TealType.uint64, left, right) def Mod(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.mod, TealType.uint64, TealType.uint64, left, right) def Exp(a: Expr, b: Expr) -> BinaryExpr: return BinaryExpr(Op.exp, TealType.uint64, TealType.uint64, a, b) def BitwiseAnd(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.bitwise_and, TealType.uint64, TealType.uint64, left, right) def BitwiseOr(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.bitwise_or, TealType.uint64, TealType.uint64, left, right) def BitwiseXor(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.bitwise_xor, TealType.uint64, TealType.uint64, left, right) def ShiftLeft(a: Expr, b: Expr) -> BinaryExpr: return BinaryExpr(Op.shl, TealType.uint64, TealType.uint64, a, b) def ShiftRight(a: Expr, b: Expr) -> BinaryExpr: return BinaryExpr(Op.shr, TealType.uint64, TealType.uint64, a, b) def Eq(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.eq, right.type_of(), TealType.uint64, left, right) def Neq(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.neq, right.type_of(), TealType.uint64, left, right) def Lt(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.lt, TealType.uint64, TealType.uint64, left, right) def Le(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.le, TealType.uint64, TealType.uint64, left, right) def Gt(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.gt, TealType.uint64, TealType.uint64, left, right) def Ge(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.ge, TealType.uint64, TealType.uint64, left, right) def GetBit(value: Expr, index: Expr) -> BinaryExpr: return BinaryExpr( Op.getbit, (TealType.anytype, TealType.uint64), TealType.uint64, value, index ) def GetByte(value: Expr, index: Expr) -> BinaryExpr: return BinaryExpr( Op.getbyte, (TealType.bytes, TealType.uint64), TealType.uint64, value, index ) def BytesAdd(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_add, TealType.bytes, TealType.bytes, left, right) def BytesMinus(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_minus, TealType.bytes, TealType.bytes, left, right) def BytesDiv(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_div, TealType.bytes, TealType.bytes, left, right) def BytesMul(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_mul, TealType.bytes, TealType.bytes, left, right) def BytesMod(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_mod, TealType.bytes, TealType.bytes, left, right) def BytesAnd(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_and, TealType.bytes, TealType.bytes, left, right) def BytesOr(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_or, TealType.bytes, TealType.bytes, left, right) def BytesXor(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_xor, TealType.bytes, TealType.bytes, left, right) def BytesEq(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_eq, TealType.bytes, TealType.uint64, left, right) def BytesNeq(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_neq, TealType.bytes, TealType.uint64, left, right) def BytesLt(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_lt, TealType.bytes, TealType.uint64, left, right) def BytesLe(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_le, TealType.bytes, TealType.uint64, left, right) def BytesGt(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_gt, TealType.bytes, TealType.uint64, left, right) def BytesGe(left: Expr, right: Expr) -> BinaryExpr: return BinaryExpr(Op.b_ge, TealType.bytes, TealType.uint64, left, right)
MIT License
openstack/sahara
sahara/db/api.py
cluster_verification_delete
python
def cluster_verification_delete(context, verification_id): return IMPL.cluster_verification_delete(context, verification_id)
Delete verification with the specified id.
https://github.com/openstack/sahara/blob/2ce24b6f933eef8860947d8632d607867dfbf841/sahara/db/api.py#L584-L586
from oslo_config import cfg from oslo_db import api as db_api from oslo_db import options from sahara.utils import types CONF = cfg.CONF options.set_defaults(CONF) _BACKEND_MAPPING = { 'sqlalchemy': 'sahara.db.sqlalchemy.api', } IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) def setup_db(): return IMPL.setup_db() def drop_db(): return IMPL.drop_db() def constraint(**conditions): return IMPL.constraint(**conditions) def equal_any(*values): return IMPL.equal_any(*values) def not_equal(*values): return IMPL.not_equal(*values) def to_dict(func): def decorator(*args, **kwargs): res = func(*args, **kwargs) if isinstance(res, types.Page): return types.Page([item.to_dict() for item in res], res.prev, res.next) if isinstance(res, list): return [item.to_dict() for item in res] if res: return res.to_dict() else: return None return decorator def cluster_get(context, cluster, show_progress=False): if show_progress: cluster = IMPL.cluster_provision_progress_update(context, cluster) else: cluster = IMPL.cluster_get(context, cluster) if cluster: return cluster.to_dict(show_progress) return None @to_dict def cluster_get_all(context, regex_search=False, **kwargs): return IMPL.cluster_get_all(context, regex_search, **kwargs) @to_dict def cluster_create(context, values): return IMPL.cluster_create(context, values) @to_dict def cluster_update(context, cluster, values): return IMPL.cluster_update(context, cluster, values) def cluster_destroy(context, cluster): IMPL.cluster_destroy(context, cluster) def node_group_add(context, cluster, values): return IMPL.node_group_add(context, cluster, values) def node_group_update(context, node_group, values): IMPL.node_group_update(context, node_group, values) def node_group_remove(context, node_group): IMPL.node_group_remove(context, node_group) def instance_add(context, node_group, values): return IMPL.instance_add(context, node_group, values) def instance_update(context, instance, values): IMPL.instance_update(context, instance, values) def instance_remove(context, instance): IMPL.instance_remove(context, instance) def append_volume(context, instance, volume_id): IMPL.append_volume(context, instance, volume_id) def remove_volume(context, instance, volume_id): IMPL.remove_volume(context, instance, volume_id) @to_dict def cluster_template_get(context, cluster_template): return IMPL.cluster_template_get(context, cluster_template) @to_dict def cluster_template_get_all(context, regex_search=False, **kwargs): return IMPL.cluster_template_get_all(context, regex_search, **kwargs) @to_dict def cluster_template_create(context, values): return IMPL.cluster_template_create(context, values) def cluster_template_destroy(context, cluster_template, ignore_prot_on_def=False): IMPL.cluster_template_destroy(context, cluster_template, ignore_prot_on_def) @to_dict def cluster_template_update(context, values, ignore_prot_on_def=False): return IMPL.cluster_template_update(context, values, ignore_prot_on_def) @to_dict def node_group_template_get(context, node_group_template): return IMPL.node_group_template_get(context, node_group_template) @to_dict def node_group_template_get_all(context, regex_search=False, **kwargs): return IMPL.node_group_template_get_all(context, regex_search, **kwargs) @to_dict def node_group_template_create(context, values): return IMPL.node_group_template_create(context, values) def node_group_template_destroy(context, node_group_template, ignore_prot_on_def=False): IMPL.node_group_template_destroy(context, node_group_template, ignore_prot_on_def) @to_dict def node_group_template_update(context, node_group_template, ignore_prot_on_def=False): return IMPL.node_group_template_update(context, node_group_template, ignore_prot_on_def) @to_dict def data_source_get(context, data_source): return IMPL.data_source_get(context, data_source) @to_dict def data_source_get_all(context, regex_search=False, **kwargs): return IMPL.data_source_get_all(context, regex_search, **kwargs) def data_source_count(context, **kwargs): return IMPL.data_source_count(context, **kwargs) @to_dict def data_source_create(context, values): return IMPL.data_source_create(context, values) def data_source_destroy(context, data_source): IMPL.data_source_destroy(context, data_source) @to_dict def data_source_update(context, data_source): return IMPL.data_source_update(context, data_source) @to_dict def job_execution_get(context, job_execution): return IMPL.job_execution_get(context, job_execution) @to_dict def job_execution_get_all(context, regex_search=False, **kwargs): return IMPL.job_execution_get_all(context, regex_search, **kwargs) def job_execution_count(context, **kwargs): return IMPL.job_execution_count(context, **kwargs) @to_dict def job_execution_create(context, values): return IMPL.job_execution_create(context, values) @to_dict def job_execution_update(context, job_execution, values): return IMPL.job_execution_update(context, job_execution, values) def job_execution_destroy(context, job_execution): IMPL.job_execution_destroy(context, job_execution) @to_dict def job_get(context, job): return IMPL.job_get(context, job) @to_dict def job_get_all(context, regex_search=False, **kwargs): return IMPL.job_get_all(context, regex_search, **kwargs) @to_dict def job_create(context, values): return IMPL.job_create(context, values) @to_dict def job_update(context, job, values): return IMPL.job_update(context, job, values) def job_destroy(context, job): IMPL.job_destroy(context, job) @to_dict def job_binary_get_all(context, regex_search=False, **kwargs): return IMPL.job_binary_get_all(context, regex_search, **kwargs) @to_dict def job_binary_get(context, job_binary): return IMPL.job_binary_get(context, job_binary) @to_dict def job_binary_create(context, values): return IMPL.job_binary_create(context, values) def job_binary_destroy(context, job_binary): IMPL.job_binary_destroy(context, job_binary) @to_dict def job_binary_update(context, values): return IMPL.job_binary_update(context, values) @to_dict def job_binary_internal_get_all(context, regex_search=False, **kwargs): return IMPL.job_binary_internal_get_all(context, regex_search, **kwargs) @to_dict def job_binary_internal_get(context, job_binary_internal): return IMPL.job_binary_internal_get(context, job_binary_internal) @to_dict def job_binary_internal_create(context, values): return IMPL.job_binary_internal_create(context, values) def job_binary_internal_destroy(context, job_binary_internal): IMPL.job_binary_internal_destroy(context, job_binary_internal) def job_binary_internal_get_raw_data(context, job_binary_internal_id): return IMPL.job_binary_internal_get_raw_data(context, job_binary_internal_id) @to_dict def job_binary_internal_update(context, job_binary_internal, values): return IMPL.job_binary_internal_update( context, job_binary_internal, values) def cluster_provision_step_add(context, cluster_id, values): return IMPL.cluster_provision_step_add(context, cluster_id, values) def cluster_provision_step_update(context, step_id): return IMPL.cluster_provision_step_update(context, step_id) def cluster_provision_progress_update(context, cluster_id): return IMPL.cluster_provision_progress_update(context, cluster_id) def cluster_event_add(context, provision_step, values): return IMPL.cluster_event_add(context, provision_step, values) @to_dict def cluster_verification_add(context, cluster_id, values): return IMPL.cluster_verification_add(context, cluster_id, values) @to_dict def cluster_verification_get(context, verification_id): return IMPL.cluster_verification_get(context, verification_id) @to_dict def cluster_verification_update(context, verification_id, values): return IMPL.cluster_verification_update(context, verification_id, values)
Apache License 2.0
h2r/pomdp-py
pomdp_problems/multi_object_search/models/transition_model.py
RobotTransitionModel.if_move_by
python
def if_move_by(cls, robot_id, state, action, dim, check_collision=True): if not isinstance(action, MotionAction): raise ValueError("Cannot move robot with %s action" % str(type(action))) robot_pose = state.pose(robot_id) rx, ry, rth = robot_pose if action.scheme == MotionAction.SCHEME_XYTH: dx, dy, th = action.motion rx += dx ry += dy rth = th elif action.scheme == MotionAction.SCHEME_VW: forward, angle = action.motion rth += angle rx = int(round(rx + forward*math.cos(rth))) ry = int(round(ry + forward*math.sin(rth))) rth = rth % (2*math.pi) if valid_pose((rx, ry, rth), dim[0], dim[1], state=state, check_collision=check_collision, pose_objid=robot_id): return (rx, ry, rth) else: return robot_pose
Defines the dynamics of robot motion; dim (tuple): the width, length of the search world.
https://github.com/h2r/pomdp-py/blob/5c1837c38676eb53442af238cbec4115f3b17f28/pomdp_problems/multi_object_search/models/transition_model.py#L84-L113
import pomdp_py import copy from pomdp_problems.multi_object_search.domain.state import * from pomdp_problems.multi_object_search.domain.observation import * from pomdp_problems.multi_object_search.domain.action import * class MosTransitionModel(pomdp_py.OOTransitionModel): def __init__(self, dim, sensors, object_ids, epsilon=1e-9): self._sensors = sensors transition_models = {objid: StaticObjectTransitionModel(objid, epsilon=epsilon) for objid in object_ids if objid not in sensors} for robot_id in sensors: transition_models[robot_id] = RobotTransitionModel(sensors[robot_id], dim, epsilon=epsilon) super().__init__(transition_models) def sample(self, state, action, **kwargs): oostate = pomdp_py.OOTransitionModel.sample(self, state, action, **kwargs) return MosOOState(oostate.object_states) def argmax(self, state, action, normalized=False, **kwargs): oostate = pomdp_py.OOTransitionModel.argmax(self, state, action, **kwargs) return MosOOState(oostate.object_states) class StaticObjectTransitionModel(pomdp_py.TransitionModel): def __init__(self, objid, epsilon=1e-9): self._objid = objid self._epsilon = epsilon def probability(self, next_object_state, state, action): if next_object_state != state.object_states[next_object_state['id']]: return self._epsilon else: return 1.0 - self._epsilon def sample(self, state, action): return self.argmax(state, action) def argmax(self, state, action): return copy.deepcopy(state.object_states[self._objid]) class RobotTransitionModel(pomdp_py.TransitionModel): def __init__(self, sensor, dim, epsilon=1e-9): self._sensor = sensor self._robot_id = sensor.robot_id self._dim = dim self._epsilon = epsilon @classmethod
MIT License
lepinkainen/pyfibot
pyfibot/botcore.py
CoreCommands.command_leave
python
def command_leave(self, user, channel, args): self.command_part(user, channel, args)
Usage: leave <channel>[@network] - Leave the specified channel
https://github.com/lepinkainen/pyfibot/blob/64ce4d7a69d37be8c1c03379dd8e65b9f77f886b/pyfibot/botcore.py#L116-L118
from __future__ import print_function, division from twisted.words.protocols import irc from twisted.internet import reactor, threads from twisted.python import rebuild from types import FunctionType import inspect import string import logging from util import pyfiurl import textwrap __pychecker__ = "unusednames=i, classattr" log = logging.getLogger("bot") class CoreCommands(object): def command_echo(self, user, channel, args): self.say(channel, "%s: %s" % (user, args)) def command_ping(self, user, channel, args): self.say( channel, "%s: My current ping is %.0fms" % (self.get_nick(user), self.pingAve * 100.0), ) def command_rehash(self, user, channel, args): if self.factory.isAdmin(user): try: log.info("rebuilding %r" % self) rebuild.updateInstance(self) if args == "conf": self.factory.reload_config() self.say(channel, "Configuration reloaded.") self.factory._unload_removed_modules() self.factory._loadmodules() except Exception as e: self.say(channel, "Rehash error: %s" % e) log.error("Rehash error: %s" % e) else: self.say(channel, "Rehash OK") log.info("Rehash OK") def say(self, channel, message, length=None): raise NotImplementedError def command_join(self, user, channel, args): if not self.factory.isAdmin(user): return password = None try: args, password = args.split(" ", 1) except ValueError: pass try: newchannel, network = args.split("@", 1) except ValueError: newchannel, network = args, self.network.alias try: bot = self.factory.allBots[network] except KeyError: self.say(channel, "I am not on that network.") else: log.debug("Attempting to join channel %s on ", (newchannel, network)) if newchannel in bot.network.channels: self.say(channel, "I am already in %s on %s." % (newchannel, network)) log.debug("Already on channel %s" % channel) log.debug("Channels I'm on this network: %s" % bot.network.channels) else: if password: bot.join(newchannel, key=password) log.debug("Joined with password") else: bot.join(newchannel) log.debug("Joined")
BSD 3-Clause New or Revised License
jindaxiang/akshare
akshare/wdbank/api.py
uses_pandas
python
def uses_pandas(f, *args, **kwargs): if not pd: raise ValueError("Pandas must be installed to be used") return f(*args, **kwargs)
Raise ValueError if pandas is not loaded
https://github.com/jindaxiang/akshare/blob/e2b0bd62d2b7cb6056ade710d82e5e1c250902be/akshare/wdbank/api.py#L79-L83
import collections import datetime import re import warnings import pandas as pd import tabulate from decorator import decorator from . import fetcher BASE_URL = "https://api.worldbank.org/v2" COUNTRIES_URL = f"{BASE_URL}/countries" ILEVEL_URL = f"{BASE_URL}/incomeLevels" INDICATOR_URL = f"{BASE_URL}/indicators" LTYPE_URL = f"{BASE_URL}/lendingTypes" SOURCES_URL = f"{BASE_URL}/sources" TOPIC_URL = f"{BASE_URL}/topics" INDIC_ERROR = "Cannot specify more than one of indicator, source, and topic" class WBSearchResult(list): def __repr__(self): try: return tabulate.tabulate( [[o["id"], o["name"]] for o in self], headers=["id", "name"], tablefmt="simple", ) except KeyError: return tabulate.tabulate( [[o["id"], o["value"]] for o in self], headers=["id", "value"], tablefmt="simple", ) if pd: class WBSeries(pd.Series): _metadata = ["last_updated"] @property def _constructor(self): return WBSeries class WBDataFrame(pd.DataFrame): _metadata = ["last_updated"] @property def _constructor(self): return WBDataFrame @decorator
MIT License
marrink-lab/polyply_1.0
polyply/src/meta_molecule.py
MetaMolecule.from_monomer_seq_linear
python
def from_monomer_seq_linear(cls, force_field, monomers, mol_name): meta_mol_graph = cls(force_field=force_field, mol_name=mol_name) res_count = 0 for monomer in monomers: trans = 0 while trans < monomer.n_blocks: if res_count != 0: connect = [(res_count-1, res_count)] else: connect = [] trans += 1 meta_mol_graph.add_monomer(res_count, monomer.resname, connect) res_count += 1 return meta_mol_graph
Constructs a meta graph for a linear molecule which is the default assumption from
https://github.com/marrink-lab/polyply_1.0/blob/4e48f86fb309b38391c73d8f9bcc1f7c6090d2cf/polyply/src/meta_molecule.py#L218-L239
from collections import (namedtuple, OrderedDict) import json import networkx as nx from networkx.readwrite import json_graph from vermouth.graph_utils import make_residue_graph from vermouth.log_helpers import StyleAdapter, get_logger from .polyply_parser import read_polyply from .graph_utils import find_nodes_with_attributes Monomer = namedtuple('Monomer', 'resname, n_blocks') LOGGER = StyleAdapter(get_logger(__name__)) def _make_edges(force_field): for block in force_field.blocks.values(): inter_types = list(block.interactions.keys()) for inter_type in inter_types: if inter_type in ["bonds", "constraints" "angles"]: block.make_edges_from_interaction_type(type_=inter_type) for link in force_field.links: inter_types = list(link.interactions.keys()) for inter_type in inter_types: if inter_type in ["bonds", "constraints", "angles"]: link.make_edges_from_interaction_type(type_=inter_type) def _interpret_residue_mapping(graph, resname, new_residues): atom_name_to_resname = {} had_atoms = [] for new_res in new_residues: new_name, atoms = new_res.split("-") names = atoms.split(",") for name in names: if name in had_atoms: msg = ("You are trying to split residue {} into {} residues. " "However, atom {} is mentioned more than once. This is not " "allowed. ") raise IOError(msg.format(resname, len(new_residues), name)) nodes = find_nodes_with_attributes(graph, resname=resname, atomname=name) had_atoms.append(name) for node in nodes: atom_name_to_resname[node] = new_name return atom_name_to_resname def _find_starting_node(meta_molecule): for node in meta_molecule.nodes: if "build" not in meta_molecule.nodes[node]: return node return next(iter(meta_molecule.nodes())) class MetaMolecule(nx.Graph): node_dict_factory = OrderedDict def __init__(self, *args, **kwargs): self.force_field = kwargs.pop('force_field', None) self.mol_name = kwargs.pop('mol_name', None) super().__init__(*args, **kwargs) self.molecule = None nx.set_node_attributes(self, True, "build") self.__search_tree = None self.root = None self.dfs = False def add_monomer(self, current, resname, connections): resids = nx.get_node_attributes(self, "resid") if resids: resid = max(resids.values()) + 1 else: resid = 1 self.add_node(current, resname=resname, resid=resid, build=True) for edge in connections: if self.has_node(edge[0]) and self.has_node(edge[1]): self.add_edge(edge[0], edge[1]) else: msg = ("Edge {} referes to nodes that currently do" "not exist. Cannot add edge to unkown nodes.") raise IOError(msg.format(edge)) def get_edge_resname(self, edge): return [self.nodes[edge[0]]["resname"], self.nodes[edge[1]]["resname"]] def relabel_and_redo_res_graph(self, mapping): max_resid = max(nx.get_node_attributes(self.molecule, "resid").values()) for node, resname in mapping.items(): self.molecule.nodes[node]["resname"] = resname old_resid = self.molecule.nodes[node]["resid"] self.molecule.nodes[node]["resid"] = old_resid + max_resid self.molecule.nodes[node]["build"] = True new_meta_graph = make_residue_graph(self.molecule, attrs=('resid', 'resname')) for idx, node in enumerate(new_meta_graph.nodes): new_meta_graph.nodes[node]["resid"] = idx for atom in new_meta_graph.nodes[node]["graph"]: self.molecule.nodes[atom]["resid"] = idx self.clear() self.add_nodes_from(new_meta_graph.nodes(data=True)) self.add_edges_from(new_meta_graph.edges) def split_residue(self, split_strings): mapping = {} for split_string in split_strings: resname, *new_residues = split_string.split(":") mapping.update(_interpret_residue_mapping(self.molecule, resname, new_residues)) self.relabel_and_redo_res_graph(mapping) return mapping @property def search_tree(self): if self.__search_tree is None: if self.root is None: self.root =_find_starting_node(self) if self.dfs: self.__search_tree = nx.bfs_tree(self, source=self.root) else: self.__search_tree = nx.dfs_tree(self, source=self.root) return self.__search_tree @staticmethod def _block_graph_to_res_graph(block): res_graph = make_residue_graph(block, attrs=('resid', 'resname')) return res_graph @classmethod
Apache License 2.0
siviltaram/persona-dialogue-generation
parlai/core/torch_generator_agent.py
Beam.advance
python
def advance(self, softmax_probs): voc_size = softmax_probs.size(-1) current_length = len(self.all_scores) - 1 if current_length < self.min_length: for hyp_id in range(softmax_probs.size(0)): softmax_probs[hyp_id][self.eos] = -NEAR_INF if len(self.bookkeep) == 0: beam_scores = softmax_probs[0] else: beam_scores = (softmax_probs + self.scores.unsqueeze(1).expand_as(softmax_probs)) for i in range(self.outputs[-1].size(0)): if self.block_ngram > 0: current_hypo = self.partial_hyps[i][1:] current_ngrams = [] for ng in range(self.block_ngram): ngrams = Beam.find_ngrams(current_hypo, ng) if len(ngrams) > 0: current_ngrams.extend(ngrams) counted_ngrams = Counter(current_ngrams) if any(v > 1 for k, v in counted_ngrams.items()): beam_scores[i] = -NEAR_INF if self.outputs[-1][i] == self.eos: beam_scores[i] = -NEAR_INF flatten_beam_scores = beam_scores.view(-1) with torch.no_grad(): best_scores, best_idxs = torch.topk( flatten_beam_scores, self.beam_size, dim=-1) self.scores = best_scores self.all_scores.append(self.scores) hyp_ids = best_idxs / voc_size tok_ids = best_idxs % voc_size self.outputs.append(tok_ids) self.bookkeep.append(hyp_ids) self.partial_hyps = [self.partial_hyps[hyp_ids[i]] + [tok_ids[i].item()] for i in range(self.beam_size)] for hypid in range(self.beam_size): if self.outputs[-1][hypid] == self.eos: eostail = self.HypothesisTail(timestep=len(self.outputs) - 1, hypid=hypid, score=self.scores[hypid], tokenid=self.eos) self.finished.append(eostail) self.n_best_counter += 1 if self.outputs[-1][0] == self.eos: self.eos_top = True if self.eos_top_ts is None: self.eos_top_ts = len(self.outputs) - 1
Advance the beam one step.
https://github.com/siviltaram/persona-dialogue-generation/blob/3cc800ffe3c5a8d16ed26522cda839acfab8d417/parlai/core/torch_generator_agent.py#L896-L963
import os import math import tempfile from collections import defaultdict, Counter, namedtuple from operator import attrgetter import torch import torch.nn as nn import torch.nn.functional as F from parlai.core.torch_agent import TorchAgent, Batch, Output from parlai.core.utils import NEAR_INF, padded_tensor, round_sigfigs, warn_once from parlai.core.thread_utils import SharedTable from parlai.core.distributed_utils import is_distributed class TorchGeneratorModel(nn.Module): def __init__( self, padding_idx=0, start_idx=1, end_idx=2, unknown_idx=3, input_dropout=0, longest_label=1, ): super().__init__() self.NULL_IDX = padding_idx self.END_IDX = end_idx self.register_buffer('START', torch.LongTensor([start_idx])) self.longest_label = longest_label def _starts(self, bsz): return self.START.detach().expand(bsz, 1) def decode_greedy(self, encoder_states, bsz, maxlen): xs = self._starts(bsz) incr_state = None logits = [] for i in range(maxlen): scores, incr_state = self.decoder(xs, encoder_states, incr_state) scores = scores[:, -1:, :] scores = self.output(scores) _, preds = scores.max(dim=-1) logits.append(scores) xs = torch.cat([xs, preds], dim=1) all_finished = ((xs == self.END_IDX).sum(dim=1) > 0).sum().item() == bsz if all_finished: break logits = torch.cat(logits, 1) return logits, xs def decode_forced(self, encoder_states, ys): bsz = ys.size(0) seqlen = ys.size(1) inputs = ys.narrow(1, 0, seqlen - 1) inputs = torch.cat([self._starts(bsz), inputs], 1) latent, _ = self.decoder(inputs, encoder_states) logits = self.output(latent) _, preds = logits.max(dim=2) return logits, preds def reorder_encoder_states(self, encoder_states, indices): raise NotImplementedError( "reorder_encoder_states must be implemented by the model" ) def reorder_decoder_incremental_state(self, incremental_state, inds): raise NotImplementedError( "reorder_decoder_incremental_state must be implemented by model" ) def forward(self, *xs, ys=None, cand_params=None, prev_enc=None, maxlen=None, bsz=None): if ys is not None: self.longest_label = max(self.longest_label, ys.size(1)) encoder_states = prev_enc if prev_enc is not None else self.encoder(*xs) if ys is not None: scores, preds = self.decode_forced(encoder_states, ys) else: scores, preds = self.decode_greedy( encoder_states, bsz, maxlen or self.longest_label ) return scores, preds, encoder_states class TorchGeneratorAgent(TorchAgent): @classmethod def add_cmdline_args(cls, argparser): agent = argparser.add_argument_group('Torch Generator Agent') agent.add_argument('--beam-size', type=int, default=1, help='Beam size, if 1 then greedy search') agent.add_argument('--beam-dot-log', type='bool', default=False, hidden=True, help='Dump beam trees as png dot images into /tmp folder') agent.add_argument('--beam-min-n-best', type=int, default=3, help='Minimum number of nbest candidates to achieve ' 'during the beam search') agent.add_argument('--beam-min-length', type=int, default=1, help='Minimum length of prediction to be generated by ' 'the beam search') agent.add_argument('--beam-block-ngram', type=int, default=0, hidden=True, help='Block all repeating ngrams up to history length n-1') agent.add_argument('--skip-generation', type='bool', default=False, hidden=True, help='Skip beam search. Useful for speeding up training, ' 'if perplexity is the validation metric.') super(TorchGeneratorAgent, cls).add_cmdline_args(argparser) return agent def __init__(self, opt, shared=None): init_model, is_finetune = self._get_init_model(opt, shared) super().__init__(opt, shared) self.beam_dot_log = opt.get('beam_dot_log', False) self.beam_size = opt.get('beam_size', 1) self.beam_min_n_best = opt.get('beam_min_n_best', 3) self.beam_min_length = opt.get('beam_min_length', 3) self.beam_block_ngram = opt.get('beam_block_ngram', 0) self.skip_generation = opt.get('skip_generation', False) if shared: self.model = shared['model'] self.criterion = shared['criterion'] self.metrics = shared['metrics'] states = shared.get('states', {}) else: self.metrics = { 'nll_loss': 0.0, 'loss': 0.0, 'num_tokens': 0, 'correct_tokens': 0, 'total_skipped_batches': 0 } if self.beam_dot_log: self.beam_dot_dir = tempfile.mkdtemp( prefix='{}-beamdot-beamsize-{}-'.format( os.path.basename( opt.get('model_file')), self.beam_size)) print( '[ Saving dot beam logs in {} ]'.format( self.beam_dot_dir)) self.build_criterion() self.build_model() if init_model is not None: print('[ Loading existing model params from {} ]' ''.format(init_model)) states = self.load(init_model) else: states = {} if shared is None and is_distributed(): self.model = torch.nn.parallel.DistributedDataParallel( self.model, device_ids=[self.opt['gpu']], broadcast_buffers=False, ) if 'train' in opt.get('datatype', ''): self.init_optim( [p for p in self.model.parameters() if p.requires_grad], optim_states=states.get('optimizer'), saved_optim_type=states.get('optimizer_type') ) self.build_lr_scheduler(states, hard_reset=is_finetune) self.reset() def _v2t(self, vec): new_vec = [] if hasattr(vec, 'cpu'): vec = vec.cpu() for i in vec: if i == self.END_IDX: break elif i != self.START_IDX: new_vec.append(i) return self.dict.vec2txt(new_vec) def build_model(self): raise NotImplementedError( "AbstractClass: build_model must be implemented by the user." ) def build_criterion(self): self.criterion = nn.CrossEntropyLoss( ignore_index=self.NULL_IDX, reduction='sum' ) if self.use_cuda: self.criterion.cuda() def _dummy_batch(self, batchsize, maxlen): return Batch( text_vec=torch.ones(batchsize, maxlen).long().cuda(), label_vec=torch.ones(batchsize, 2).long().cuda(), ) def _init_cuda_buffer(self, batchsize, maxlen, force=False): if self.use_cuda and (force or not hasattr(self, 'buffer_initialized')): try: loss = self.compute_loss(self._dummy_batch(batchsize, maxlen)) loss.backward() self.buffer_initialized = True except RuntimeError as e: if 'out of memory' in str(e): m = ('CUDA OOM: Lower batch size (-bs) from {} or lower ' ' max sequence length (-tr) from {}' ''.format(batchsize, maxlen)) raise RuntimeError(m) else: raise e def reset_metrics(self): super().reset_metrics() self.metrics['loss'] = 0.0 self.metrics['nll_loss'] = 0.0 self.metrics['num_tokens'] = 0 self.metrics['correct_tokens'] = 0 def share(self): shared = super().share() shared['model'] = self.model shared['criterion'] = self.criterion if self.opt.get('numthreads', 1) > 1: if isinstance(self.metrics, dict): self.metrics = SharedTable(self.metrics) self.model.share_memory() shared['states'] = { 'optimizer_type': self.opt['optimizer'], } shared['metrics'] = self.metrics if self.beam_dot_log is True: shared['beam_dot_dir'] = self.beam_dot_dir return shared def report(self): base = super().report() m = {} num_tok = self.metrics['num_tokens'] if num_tok > 0: m['loss'] = self.metrics['loss'] if self.metrics['correct_tokens'] > 0: m['token_acc'] = self.metrics['correct_tokens'] / num_tok m['nll_loss'] = self.metrics['nll_loss'] / num_tok try: m['ppl'] = math.exp(m['nll_loss']) except OverflowError: m['ppl'] = float('inf') if self.metrics['total_skipped_batches'] > 0: m['total_skipped_batches'] = self.metrics['total_skipped_batches'] for k, v in m.items(): base[k] = round_sigfigs(v, 4) return base def vectorize(self, *args, **kwargs): kwargs['add_start'] = False kwargs['add_end'] = True return super().vectorize(*args, **kwargs) def _model_input(self, batch): return (batch.text_vec, ) def compute_loss(self, batch, return_output=False): if batch.label_vec is None: raise ValueError('Cannot compute loss without a label.') model_output = self.model(*self._model_input(batch), ys=batch.label_vec) scores, preds, *_ = model_output score_view = scores.view(-1, scores.size(-1)) loss = self.criterion(score_view, batch.label_vec.view(-1)) notnull = batch.label_vec.ne(self.NULL_IDX) target_tokens = notnull.long().sum().item() correct = ((batch.label_vec == preds) * notnull).sum().item() self.metrics['correct_tokens'] += correct self.metrics['nll_loss'] += loss.item() self.metrics['num_tokens'] += target_tokens loss /= target_tokens if return_output: return (loss, model_output) else: return loss def train_step(self, batch): batchsize = batch.text_vec.size(0) self._init_cuda_buffer(batchsize, self.truncate or 256) self.model.train() self.zero_grad() try: loss = self.compute_loss(batch) self.metrics['loss'] += loss.item() loss.backward() self.update_params() except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch. ' 'if this happens frequently, decrease batchsize or ' 'truncate the inputs to the model.') self.metrics['total_skipped_batches'] += 1 self._init_cuda_buffer(8, 8, True) else: raise e def _write_beam_dots(self, text_vecs, beams): for i, b in enumerate(beams): dot_graph = b.get_beam_dot(dictionary=self.dict, n_best=3) image_name = self._v2t(text_vecs[i, -20:]) image_name = image_name.replace(' ', '-').replace('__null__', '') dot_graph.write_png( os.path.join(self.beam_dot_dir, "{}.png".format(image_name)) ) def eval_step(self, batch): if batch.text_vec is None: return bsz = batch.text_vec.size(0) self.model.eval() cand_scores = None if batch.label_vec is not None: loss = self.compute_loss(batch) self.metrics['loss'] += loss.item() preds = None if self.skip_generation: warn_once( "--skip-generation does not produce accurate metrics beyond ppl", RuntimeWarning ) elif self.beam_size == 1: _, preds, *_ = self.model(*self._model_input(batch), bsz=bsz) elif self.beam_size > 1: out = self.beam_search( self.model, batch, self.beam_size, start=self.START_IDX, end=self.END_IDX, pad=self.NULL_IDX, min_length=self.beam_min_length, min_n_best=self.beam_min_n_best, block_ngram=self.beam_block_ngram ) beam_preds_scores, _, beams = out preds, scores = zip(*beam_preds_scores) if self.beam_dot_log is True: self._write_beam_dots(batch.text_vec, beams) cand_choices = None if self.rank_candidates: cand_choices = [] encoder_states = self.model.encoder(*self._model_input(batch)) for i in range(bsz): num_cands = len(batch.candidate_vecs[i]) enc = self.model.reorder_encoder_states(encoder_states, [i] * num_cands) cands, _ = padded_tensor( batch.candidate_vecs[i], self.NULL_IDX, self.use_cuda ) scores, _ = self.model.decode_forced(enc, cands) cand_losses = F.cross_entropy( scores.view(num_cands * cands.size(1), -1), cands.view(-1), reduction='none', ).view(num_cands, cands.size(1)) mask = (cands != self.NULL_IDX).float() cand_scores = (cand_losses * mask).sum(dim=1) / (mask.sum(dim=1) + 1e-9) _, ordering = cand_scores.sort() cand_choices.append([batch.candidates[i][o] for o in ordering]) text = [self._v2t(p) for p in preds] if preds is not None else None return Output(text, cand_choices) def beam_search(self, model, batch, beam_size, start=1, end=2, pad=0, min_length=3, min_n_best=5, max_ts=40, block_ngram=0): encoder_states = model.encoder(*self._model_input(batch)) dev = batch.text_vec.device bsz = len(batch.text_lengths) beams = [ Beam(beam_size, min_length=min_length, padding_token=pad, bos_token=start, eos_token=end, min_n_best=min_n_best, cuda=dev, block_ngram=block_ngram) for i in range(bsz) ] decoder_input = torch.LongTensor([start]).expand(bsz * beam_size, 1).to(dev) inds = torch.arange(bsz).to(dev).unsqueeze(1).repeat(1, beam_size).view(-1) encoder_states = model.reorder_encoder_states(encoder_states, inds) incr_state = None for ts in range(max_ts): if all((b.done() for b in beams)): break score, incr_state = model.decoder(decoder_input, encoder_states, incr_state) score = score[:, -1:, :] score = model.output(score) score = score.view(bsz, beam_size, -1) score = F.log_softmax(score, dim=-1) for i, b in enumerate(beams): if not b.done(): b.advance(score[i]) incr_state_inds = torch.cat( [beam_size * i + b.get_backtrack_from_current_step() for i, b in enumerate(beams)]) incr_state = model.reorder_decoder_incremental_state( incr_state, incr_state_inds ) decoder_input = torch.index_select(decoder_input, 0, incr_state_inds) selection = torch.cat( [b.get_output_from_current_step() for b in beams]).unsqueeze(-1) decoder_input = torch.cat([decoder_input, selection], dim=-1) for b in beams: b.check_finished() beam_preds_scores = [list(b.get_top_hyp()) for b in beams] for pair in beam_preds_scores: pair[0] = Beam.get_pretty_hypothesis(pair[0]) n_best_beams = [b.get_rescored_finished(n_best=min_n_best) for b in beams] n_best_beam_preds_scores = [] for i, beamhyp in enumerate(n_best_beams): this_beam = [] for hyp in beamhyp: pred = beams[i].get_pretty_hypothesis( beams[i].get_hyp_from_finished(hyp)) score = hyp.score this_beam.append((pred, score)) n_best_beam_preds_scores.append(this_beam) return beam_preds_scores, n_best_beam_preds_scores, beams class _mydefaultdict(defaultdict): def get(self, key, default=None): return super().get(key, default or self.default_factory()) class PerplexityEvaluatorAgent(TorchGeneratorAgent): def __init__(self, opt, shared=None): if opt.get('multigpu'): print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.') opt['multigpu'] = False super().__init__(opt, shared) self.prev_enc = None self.last_xs = None def next_word_probability(self, partial_out): obs = self.observation xs = obs['text_vec'].unsqueeze(0) ys = self._vectorize_text( ' '.join(partial_out), False, True, self.truncate ).unsqueeze(0) if self.prev_enc is not None and self.last_xs is not None and ( xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]): self.prev_enc = None self.last_xs = xs self.model.eval() out = self.model( xs, ys=(ys if len(partial_out) > 0 else None), prev_enc=self.prev_enc, maxlen=1) scores, self.prev_enc = out probs = F.softmax(scores.select(1, -1), dim=1).squeeze() dist = _mydefaultdict(lambda: 1e-7) for i in range(len(probs)): dist[self.dict[i]] = probs[i].item() return dist class Beam(object): def __init__(self, beam_size, min_length=3, padding_token=0, bos_token=1, eos_token=2, min_n_best=3, cuda='cpu', block_ngram=0): self.beam_size = beam_size self.min_length = min_length self.eos = eos_token self.bos = bos_token self.pad = padding_token self.device = cuda self.scores = torch.Tensor(self.beam_size).float().zero_().to( self.device) self.all_scores = [torch.Tensor([0.0] * beam_size).to(self.device)] self.bookkeep = [] self.outputs = [torch.Tensor(self.beam_size).long() .fill_(self.bos).to(self.device)] self.finished = [] self.HypothesisTail = namedtuple( 'HypothesisTail', ['timestep', 'hypid', 'score', 'tokenid']) self.eos_top = False self.eos_top_ts = None self.n_best_counter = 0 self.min_n_best = min_n_best self.block_ngram = block_ngram self.partial_hyps = [[self.bos] for i in range(beam_size)] @staticmethod def find_ngrams(input_list, n): return list(zip(*[input_list[i:] for i in range(n)])) def get_output_from_current_step(self): return self.outputs[-1] def get_backtrack_from_current_step(self): return self.bookkeep[-1]
MIT License
apple/ccs-twistedextensions
twext/enterprise/queue.py
TableSyntaxByName.toString
python
def toString(self, inObject): return inObject.model.name.encode("UTF-8")
Convert a L{TableSyntax} object into just its name for wire transport. @param inObject: a table. @type inObject: L{TableSyntax} @return: the name of that table @rtype: L{bytes}
https://github.com/apple/ccs-twistedextensions/blob/2c4046df88873dcf33fba7840ed90e4238dcbec7/twext/enterprise/queue.py#L220-L230
from functools import wraps from datetime import datetime from zope.interface import implements from twisted.application.service import MultiService from twisted.internet.protocol import Factory from twisted.internet.defer import ( inlineCallbacks, returnValue, Deferred, passthru, succeed ) from twisted.internet.endpoints import TCP4ClientEndpoint from twisted.protocols.amp import AMP, Command, Integer, Argument, String from twisted.python.reflect import qual from twisted.python import log from twext.enterprise.dal.syntax import SchemaSyntax, Lock, NamedValue from twext.enterprise.dal.model import ProcedureCall from twext.enterprise.dal.record import Record, fromTable, NoSuchRecord from twisted.python.failure import Failure from twext.enterprise.dal.model import Table, Schema, SQLType, Constraint from twisted.internet.endpoints import TCP4ServerEndpoint from twext.enterprise.ienterprise import IQueuer from zope.interface.interface import Interface from twext.enterprise.locking import NamedLock class _IWorkPerformer(Interface): def performWork(table, workID): def makeNodeSchema(inSchema): NodeTable = Table(inSchema, "NODE_INFO") NodeTable.addColumn("HOSTNAME", SQLType("varchar", 255)) NodeTable.addColumn("PID", SQLType("integer", None)) NodeTable.addColumn("PORT", SQLType("integer", None)) NodeTable.addColumn("TIME", SQLType("timestamp", None)).setDefaultValue( ProcedureCall("timezone", ["UTC", NamedValue("CURRENT_TIMESTAMP")]) ) for column in NodeTable.columns: NodeTable.tableConstraint(Constraint.NOT_NULL, [column.name]) NodeTable.primaryKey = [ NodeTable.columnNamed("HOSTNAME"), NodeTable.columnNamed("PORT"), ] return inSchema NodeInfoSchema = SchemaSyntax(makeNodeSchema(Schema(__file__))) @inlineCallbacks def inTransaction(transactionCreator, operation): txn = transactionCreator() try: result = yield operation(txn) except: f = Failure() yield txn.abort() returnValue(f) else: yield txn.commit() returnValue(result) def astimestamp(v): return (v - datetime.utcfromtimestamp(0)).total_seconds() class TableSyntaxByName(Argument): def fromStringProto(self, inString, proto): return getattr(proto.schema, inString.decode("UTF-8"))
Apache License 2.0
differentiableuniverseinitiative/jax_cosmo
jax_cosmo/sparse.py
det
python
def det(sparse): sign, logdet = slogdet(sparse) return sign * np.exp(logdet)
Calculate the determinant of a sparse matrix. Uses :func:`slogdet`. For a zero sparse matrix, the result of this computation is currently undefined and will return nan. Parameters ---------- sparse : array 3D array of shape (ny, nx, ndiag) of block diagonal elements. Returns ------- float Determinant result.
https://github.com/differentiableuniverseinitiative/jax_cosmo/blob/c2f8b59214df70d820bbad53f6a7b1ab2280638a/jax_cosmo/sparse.py#L370-L389
import functools import jax.numpy as np from jax import jit from jax import vmap def is_sparse(sparse): return np.asarray(sparse).ndim == 3 def check_sparse(sparse, square=False): sparse = np.asarray(sparse) if sparse.ndim != 3: raise ValueError("Expected 3D array of sparse diagonals.") if square and (sparse.shape[0] != sparse.shape[1]): raise ValueError("Expected a square matrix.") return sparse @jit def to_dense(sparse): sparse = check_sparse(sparse) return np.vstack(vmap(lambda row: np.hstack(vmap(np.diag)(row)))(sparse)) @jit def dot(*args): if len(args) == 2: A, B = args A = np.asarray(A) B = np.asarray(B) if is_sparse(A): Acols = A.shape[1] * A.shape[2] else: if A.ndim < 1 or A.ndim > 2: raise ValueError(f"A has invalid dimension {A.ndim} (expected 1 or 2).") Acols = A.shape[-1] if is_sparse(B): Brows = B.shape[0] * B.shape[2] else: if B.ndim < 1 or B.ndim > 2: raise ValueError(f"B has invalid dimension {B.ndim} (expected 1 or 2).") Brows = B.shape[0] if Acols != Brows: raise ValueError( f"Shapes of A {A.shape} and B {B.shape} not compatible for dot product." ) if is_sparse(A): if is_sparse(B): return sparse_dot_sparse(A, B) else: return sparse_dot_vec(A, B) if B.ndim == 1 else sparse_dot_dense(A, B) else: return vec_dot_sparse(A, B) if A.ndim == 1 else dense_dot_sparse(A, B) elif len(args) == 3: A, B, C = args if A.ndim != 2 or B.ndim != 3 or C.ndim != 2: raise ValueError("Can only handle dense @ sparse @ dense bilinear form.") if ( A.shape[1] != B.shape[0] * B.shape[2] or B.shape[1] * B.shape[2] != C.shape[0] ): raise ValueError( "Shapes of A {A.shape}, B {B.shape}, C {C.shape} not compatible for dot product." ) return dense_dot_sparse_dot_dense(A, B, C) else: raise ValueError(f"Expected 2 or 3 input arrays but got {len(args)}.") @jit def sparse_dot_vec(sparse, vec): return vmap( lambda row, vec: np.sum(vmap(np.multiply)(row, vec.reshape(row.shape)), axis=0), in_axes=(0, None), )(sparse, vec).reshape(-1) @jit def sparse_dot_dense(sparse, dense): return vmap(sparse_dot_vec, (None, 1), 1)(sparse, dense) @jit def vec_dot_sparse(vec, sparse): return vmap( lambda vec, col: np.sum(vmap(np.multiply)(vec.reshape(col.shape), col), axis=0), in_axes=(None, 1), )(vec, sparse).reshape(-1) @jit def dense_dot_sparse(dense, sparse): return vmap(vec_dot_sparse, (0, None), 0)(dense, sparse) @jit def sparse_dot_sparse(sparse1, sparse2): return vmap( vmap( lambda B1, B2: np.sum(np.multiply(B1, B2), axis=0), (0, None), 0, ), (None, 1), 1, )(sparse1, sparse2) @jit def dense_dot_sparse_dot_dense(X, Y, Z): return vmap( vmap( lambda row, sparse, col: np.dot(row, sparse_dot_vec(sparse, col)), (None, None, 1), ), (0, None, None), )(X, Y, Z) @jit def inv(sparse): sparse = check_sparse(sparse, square=True) return np.transpose(np.linalg.inv(np.transpose(sparse, (2, 0, 1))), (1, 2, 0)) @functools.partial(jit, static_argnums=(1, 2, 3)) def _block_det(sparse, k, N, P): u = sparse[k : k + 1, k + 1 : N, 0:P] S = sparse[k + 1 : N, k + 1 : N, 0:P] v = sparse[k + 1 : N, k : k + 1, 0:P] Sinv_v = sparse_dot_sparse(inv(S), v) M = sparse[k, k] - sparse_dot_sparse(u, Sinv_v) sign = np.product(np.sign(M)) logdet = np.sum(np.log(np.abs(M))) return sign, logdet @jit def slogdet(sparse): sparse = check_sparse(sparse, square=True) N, _, P = sparse.shape sign = np.product(np.sign(sparse[-1, -1])) logdet = np.sum(np.log(np.abs(sparse[-1, -1]))) for i in range(N - 1): s, ld = _block_det(sparse, i, N, P) sign *= s logdet += ld return sign, logdet @jit
MIT License
darkenergysurvey/ugali
ugali/utils/projector.py
hms2dec
python
def hms2dec(hms): DEGREE = 360. HOUR = 24. MINUTE = 60. SECOND = 3600. if isstring(hms): hour,minute,second = np.array(re.split('[hms]',hms))[:3].astype(float) else: hour,minute,second = hms.T decimal = (hour + minute * 1./MINUTE + second * 1./SECOND)*(DEGREE/HOUR) return decimal
Convert longitude from hours,minutes,seconds in string or 3-array format to decimal degrees. ADW: This really should be replaced by astropy
https://github.com/darkenergysurvey/ugali/blob/82abffcc92bddf830d89f85cb3966870f7d9f720/ugali/utils/projector.py#L432-L450
import re import numpy as np from ugali.utils.logger import logger from ugali.utils.mlab import isstring class SphericalRotator: def __init__(self, lon_ref, lat_ref, zenithal=False): self.setReference(lon_ref, lat_ref, zenithal) def setReference(self, lon_ref, lat_ref, zenithal=False): if zenithal: phi = (np.pi / 2.) + np.radians(lon_ref) theta = (np.pi / 2.) - np.radians(lat_ref) psi = 0. if not zenithal: phi = (-np.pi / 2.) + np.radians(lon_ref) theta = np.radians(lat_ref) psi = np.radians(90.) cos_psi,sin_psi = np.cos(psi),np.sin(psi) cos_phi,sin_phi = np.cos(phi),np.sin(phi) cos_theta,sin_theta = np.cos(theta),np.sin(theta) self.rotation_matrix = np.array([ [cos_psi * cos_phi - cos_theta * sin_phi * sin_psi, cos_psi * sin_phi + cos_theta * cos_phi * sin_psi, sin_psi * sin_theta], [-sin_psi * cos_phi - cos_theta * sin_phi * cos_psi, -sin_psi * sin_phi + cos_theta * cos_phi * cos_psi, cos_psi * sin_theta], [sin_theta * sin_phi, -sin_theta * cos_phi, cos_theta] ]) self.inverted_rotation_matrix = np.linalg.inv(self.rotation_matrix) def cartesian(self,lon,lat): lon = np.radians(lon) lat = np.radians(lat) x = np.cos(lat) * np.cos(lon) y = np.cos(lat) * np.sin(lon) z = np.sin(lat) return np.array([x,y,z]) def rotate(self, lon, lat, invert=False): vec = self.cartesian(lon,lat) if invert: vec_prime = np.dot(np.array(self.inverted_rotation_matrix), vec) else: vec_prime = np.dot(np.array(self.rotation_matrix), vec) lon_prime = np.arctan2(vec_prime[1], vec_prime[0]) lat_prime = np.arcsin(vec_prime[2]) return (np.degrees(lon_prime) % 360.), np.degrees(lat_prime) class Projector: def __init__(self, lon_ref, lat_ref, proj_type = 'ait'): self.lon_ref = lon_ref self.lat_ref = lat_ref self.proj_type = proj_type if proj_type.lower() == 'ait': self.rotator = SphericalRotator(lon_ref, lat_ref, zenithal=False) self.sphere_to_image_func = aitoffSphereToImage self.image_to_sphere_func = aitoffImageToSphere elif proj_type.lower() == 'tan': self.rotator = SphericalRotator(lon_ref, lat_ref, zenithal=True) self.sphere_to_image_func = gnomonicSphereToImage self.image_to_sphere_func = gnomonicImageToSphere elif proj_type.lower() == 'car': def rotate(lon,lat,invert=False): if invert: return lon + np.array([lon_ref]), lat + np.array([lat_ref]) else: return lon - np.array([lon_ref]), lat - np.array([lat_ref]) self.rotator = SphericalRotator(lon_ref, lat_ref, zenithal=False) self.rotator.rotate = rotate self.sphere_to_image_func = cartesianSphereToImage self.image_to_sphere_func = cartesianImageToSphere else: logger.warn('%s not recognized'%(proj_type)) def sphereToImage(self, lon, lat): scalar = np.isscalar(lon) lon, lat = np.asarray(lon), np.asarray(lat) lon_rotated, lat_rotated = self.rotator.rotate(lon.flat, lat.flat) x, y = self.sphere_to_image_func(lon_rotated, lat_rotated) if scalar: return np.asscalar(x), np.asscalar(y) else: return x.reshape(lon.shape), y.reshape(lat.shape) sphere2image = sphereToImage def imageToSphere(self, x, y): scalar = np.isscalar(x) x, y = np.asarray(x), np.asarray(y) lon_rotated, lat_rotated = self.image_to_sphere_func(x.flat, y.flat) lon, lat = self.rotator.rotate(lon_rotated, lat_rotated, invert = True) if scalar: return np.asscalar(lon), np.asscalar(lat) else: return lon.reshape(x.shape), lat.reshape(y.shape) image2sphere = imageToSphere def sphere2image(lon_ref,lat_ref,lon,lat): proj = Projector(lon_ref,lat_ref) return proj.sphere2image(lon,lat) def image2sphere(lon_ref,lat_ref,x,y): proj = Projector(lon_ref,lat_ref) return proj.image2sphere(x,y) def cartesianSphereToImage(lon, lat): lon = lon - 360.*(lon>180) x,y = lon,lat return x,y def cartesianImageToSphere(x,y): x = x - 360.*(x>180) lon,lat = x,y return lon,lat def aitoffSphereToImage(lon, lat): lon = lon - 360.*(lon>180) lon = np.radians(lon) lat = np.radians(lat) half_lon = lon/2. cos_lat = np.cos(lat) gamma = (180. / np.pi) * np.sqrt(2. / (1. + (cos_lat * np.cos(half_lon)))) x = 2. * gamma * cos_lat * np.sin(half_lon) y = gamma * np.sin(lat) return x, y def aitoffImageToSphere(x, y): x = x - 360.*(x>180) x = np.asarray(np.radians(x)) y = np.asarray(np.radians(y)) z = np.sqrt(1. - (x / 4.)**2 - (y / 2.)**2) lon = 2. * np.arctan2((2. * z**2) - 1, (z / 2.) * x) lat = np.arcsin( y * z) return ((180. - np.degrees(lon)) % 360.), np.degrees(lat) def gnomonicSphereToImage(lon, lat): lon = lon - 360.*(lon>180) lon = np.radians(lon) lat = np.radians(lat) r_theta = (180. / np.pi) / np.tan(lat) x = r_theta * np.cos(lon) y = r_theta * np.sin(lon) return x, y def gnomonicImageToSphere(x, y): x = x - 360.*(x>180) x = np.asarray(x) y = np.asarray(y) lon = np.degrees(np.arctan2(y, x)) r_theta = np.sqrt(x**2 + y**2) lat = np.degrees(np.arctan(180. / (np.pi * r_theta))) return lon, lat def angsep2(lon_1, lat_1, lon_2, lat_2): import healpy v10, v11, v12 = healpy.ang2vec(np.radians(90. - lat_1), np.radians(lon_1)).transpose() v20, v21, v22 = healpy.ang2vec(np.radians(90. - lat_2), np.radians(lon_2)).transpose() val = (v10 * v20) + (v11 * v21) + (v12 * v22) val = np.clip(val, -1., 1.) return np.degrees(np.arccos(val)) def angsep(lon1,lat1,lon2,lat2): lon1,lat1 = np.radians([lon1,lat1]) lon2,lat2 = np.radians([lon2,lat2]) sdlon = np.sin(lon2 - lon1) cdlon = np.cos(lon2 - lon1) slat1 = np.sin(lat1) slat2 = np.sin(lat2) clat1 = np.cos(lat1) clat2 = np.cos(lat2) num1 = clat2 * sdlon num2 = clat1 * slat2 - slat1 * clat2 * cdlon denominator = slat1 * slat2 + clat1 * clat2 * cdlon return np.degrees(np.arctan2(np.hypot(num1,num2), denominator)) def galToCel(ll, bb): bb = np.radians(bb) sin_bb = np.sin(bb) cos_bb = np.cos(bb) ll = np.radians(ll) ra_gp = np.radians(192.85948) de_gp = np.radians(27.12825) lcp = np.radians(122.932) sin_lcp_ll = np.sin(lcp - ll) cos_lcp_ll = np.cos(lcp - ll) sin_d = (np.sin(de_gp) * sin_bb) + (np.cos(de_gp) * cos_bb * cos_lcp_ll) ramragp = np.arctan2(cos_bb * sin_lcp_ll, (np.cos(de_gp) * sin_bb) - (np.sin(de_gp) * cos_bb * cos_lcp_ll)) dec = np.arcsin(sin_d) ra = (ramragp + ra_gp + (2. * np.pi)) % (2. * np.pi) return np.degrees(ra), np.degrees(dec) gal2cel = galToCel def celToGal(ra, dec): dec = np.radians(dec) sin_dec = np.sin(dec) cos_dec = np.cos(dec) ra = np.radians(ra) ra_gp = np.radians(192.85948) de_gp = np.radians(27.12825) sin_ra_gp = np.sin(ra - ra_gp) cos_ra_gp = np.cos(ra - ra_gp) lcp = np.radians(122.932) sin_b = (np.sin(de_gp) * sin_dec) + (np.cos(de_gp) * cos_dec * cos_ra_gp) lcpml = np.arctan2(cos_dec * sin_ra_gp, (np.cos(de_gp) * sin_dec) - (np.sin(de_gp) * cos_dec * cos_ra_gp)) bb = np.arcsin(sin_b) ll = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi) return np.degrees(ll), np.degrees(bb) cel2gal = celToGal def estimate_angle(angle, origin, new_frame, offset=1e-7): from astropy.coordinates import SkyCoord import astropy.units as u angle_deg = angle*np.pi/180 newlat = offset * np.cos(angle_deg) + origin.data.lat.degree newlon = (offset * np.sin(angle_deg) / np.cos(newlat * np.pi/180) + origin.data.lon.degree) sc = SkyCoord(newlon, newlat, unit='degree', frame=origin.frame.name) new_origin = origin.transform_to(new_frame) new_sc = sc.transform_to(new_frame) return new_origin.position_angle(new_sc).deg def gal2cel_angle(glon,glat,angle,offset=1e-7): from astropy.coordinates import SkyCoord import astropy.units as u origin = SkyCoord(glon,glat,unit=u.deg,frame='galactic') return estimate_angle(angle,origin,'fk5',offset) def cel2gal_angle(ra,dec,angle,offset=1e-7): from astropy.coordinates import SkyCoord import astropy.units as u origin = SkyCoord(ra,dec,unit=u.deg,frame='fk5') return estimate_angle(angle,origin,'galactic',offset) def dec2hms(dec): DEGREE = 360. HOUR = 24. MINUTE = 60. SECOND = 3600. dec = float(dec) fhour = dec*(HOUR/DEGREE) hour = int(fhour) fminute = (fhour - hour)*MINUTE minute = int(fminute) second = (fminute - minute)*MINUTE return (hour, minute, second) def dec2dms(dec): DEGREE = 360. HOUR = 24. MINUTE = 60. SECOND = 3600. dec = float(dec) sign = np.copysign(1.0,dec) fdeg = np.abs(dec) deg = int(fdeg) fminute = (fdeg - deg)*MINUTE minute = int(fminute) second = (fminute - minute)*MINUTE deg = sign*int(deg) return (deg, minute, second)
MIT License
neteasegame/atx
atx/record/base.py
BaseRecorder.start
python
def start(self): self.update_device_info() self.get_device_status(0) self.hook() self.thread = threading.Thread(target=self._run) self.thread.start() self.running = True
start running in background.
https://github.com/neteasegame/atx/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/record/base.py#L66-L73
import bisect import cv2 import os import pickle import Queue import sys import threading import time import traceback import json from collections import namedtuple class BaseRecorder(object): monitor_period = 3 def __init__(self, device=None, workdir=".", realtime_analyze=False): self.device = None self.device_info = {} self.running = False self.setup_workdir(workdir) if device is not None: self.attach(device) self.realtime_analyze = realtime_analyze self.thread = None self.frames = [] self.last_frame_time = None self.case_draft = [] self.input_queue = Queue.Queue() self.input_index = 0 self.addons = {} for cls in self.__class__.__bases__: name = getattr(cls, '_%s__addon_name' % (cls.__name__,), None) if name is not None: gfun = getattr(self, 'get_%s' % (name,)) sfun = getattr(self, 'save_%s' % (name,)) lfun = getattr(self, 'load_%s' % (name,)) self.addons[name] = (gfun, sfun, lfun) def setup_workdir(self, workdir): self.workdir = workdir self.casedir = os.path.join(workdir, 'case') if not os.path.exists(self.casedir): os.makedirs(self.casedir) self.framedir = os.path.join(workdir, 'frames') if not os.path.exists(self.framedir): os.makedirs(self.framedir) def update_device_info(self): if self.device is None: return w, h = self.device.display self.device_info = {"width":w, "height":h}
Apache License 2.0
jameschapman19/cca_zoo
cca_zoo/data/toy.py
Tangled_MNIST_Dataset.__init__
python
def __init__(self, mnist_type="MNIST", train=True, flatten=True): if mnist_type == "MNIST": self.dataset = datasets.MNIST("../../data", train=train, download=True) elif mnist_type == "FashionMNIST": self.dataset = datasets.FashionMNIST( "../../data", train=train, download=True ) elif mnist_type == "KMNIST": self.dataset = datasets.KMNIST("../../data", train=train, download=True) self.data = self.dataset.data self.transform = transforms.Compose([transforms.ToTensor()]) self.targets = self.dataset.targets self.OHs = _OH_digits(self.targets.numpy().astype(int)) self.filtered_classes = [] self.filtered_nums = [] for i in range(10): self.filtered_classes.append(self.data[self.targets == i]) self.filtered_nums.append(self.filtered_classes[i].shape[0]) self.flatten = flatten
:param mnist_type: "MNIST", "FashionMNIST" or "KMNIST" :param train: whether this is train or test :param flatten: whether to flatten the data into array or use 2d images
https://github.com/jameschapman19/cca_zoo/blob/ee9d4957409c1c0c03acacd1fc2fdb61cedb98a8/cca_zoo/data/toy.py#L170-L195
import numpy as np import torch import torch.utils.data from PIL import Image from torch.utils.data import Dataset from torchvision import datasets, transforms from torchvision.transforms.functional import InterpolationMode class Split_MNIST_Dataset(Dataset): def __init__( self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True ): if mnist_type == "MNIST": self.dataset = datasets.MNIST("../../data", train=train, download=True) elif mnist_type == "FashionMNIST": self.dataset = datasets.FashionMNIST( "../../data", train=train, download=True ) elif mnist_type == "KMNIST": self.dataset = datasets.KMNIST("../../data", train=train, download=True) self.data = self.dataset.data self.base_transform = transforms.ToTensor() self.targets = self.dataset.targets self.flatten = flatten def __len__(self): return len(self.data) def __getitem__(self, idx): x = self.data[idx].flatten() x_a = x[:392] / 255 x_b = x[392:] / 255 label = self.targets[idx] return (x_a, x_b), label def to_numpy(self, indices=None): if indices is None: indices = np.arange(self.__len__()) view_1 = np.zeros((len(indices), 392)) view_2 = np.zeros((len(indices), 392)) labels = np.zeros(len(indices)).astype(int) for i, n in enumerate(indices): sample = self[n] view_1[i] = sample[0][0].numpy() view_2[i] = sample[0][1].numpy() labels[i] = sample[1].numpy().astype(int) return (view_1, view_2), labels class Noisy_MNIST_Dataset(Dataset): def __init__( self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True ): if mnist_type == "MNIST": self.dataset = datasets.MNIST("../../data", train=train, download=True) elif mnist_type == "FashionMNIST": self.dataset = datasets.FashionMNIST( "../../data", train=train, download=True ) elif mnist_type == "KMNIST": self.dataset = datasets.KMNIST("../../data", train=train, download=True) self.data = self.dataset.data self.base_transform = transforms.ToTensor() self.a_transform = transforms.Compose( [ transforms.ToTensor(), transforms.ToPILImage(), ] ) self.b_transform = transforms.Compose( [ transforms.ToTensor(), transforms.Lambda(_add_mnist_noise), transforms.Lambda(self.__threshold_func__), ] ) self.targets = self.dataset.targets self.filtered_classes = [] self.filtered_nums = [] for i in range(10): self.filtered_classes.append(self.data[self.targets == i]) self.filtered_nums.append(self.filtered_classes[i].shape[0]) self.flatten = flatten def __threshold_func__(self, x): x[x > 1] = 1 return x def __len__(self): return len(self.data) def __getitem__(self, idx): x_a = self.a_transform(self.data[idx].numpy() / 255) rot_a = torch.rand(1) * 90 - 45 x_a = transforms.functional.rotate( x_a, rot_a.item(), interpolation=InterpolationMode.BILINEAR ) x_a = self.base_transform(x_a) label = self.targets[idx] random_index = np.random.randint(self.filtered_nums[label]) x_b = Image.fromarray( self.filtered_classes[label][random_index, :, :].numpy() / 255, mode="L" ) x_b = self.b_transform(x_b) if self.flatten: x_a = torch.flatten(x_a) x_b = torch.flatten(x_b) return (x_b, x_a), (rot_a, label) def to_numpy(self, indices=None): if indices is None: indices = np.arange(self.__len__()) view_1 = np.zeros((len(indices), 784)) view_2 = np.zeros((len(indices), 784)) labels = np.zeros(len(indices)).astype(int) rotations = np.zeros(len(indices)) for i, n in enumerate(indices): sample = self[n] view_1[i] = sample[0][0].numpy().reshape((-1, 28 * 28)) view_2[i] = sample[0][1].numpy().reshape((-1, 28 * 28)) rotations[i] = sample[1][0].numpy() labels[i] = sample[1][1].numpy().astype(int) return (view_1, view_2), (rotations, labels) class Tangled_MNIST_Dataset(Dataset):
MIT License
dmsul/econtools
econtools/metrics/regutil.py
flag_nonsingletons
python
def flag_nonsingletons(df, avar, sample): counts = df[sample].groupby(avar).size() big_counts = df[[avar]].join(counts.to_frame('_T'), on=avar).fillna(0) non_single = big_counts['_T'] > 1 return non_single
Boolean flag for 'not from a singleton `avar` group.
https://github.com/dmsul/econtools/blob/496de15abde59a8397aff35e70107c7d59848321/econtools/metrics/regutil.py#L77-L82
from typing import Optional, List import pandas as pd import numpy as np import numpy.linalg as la from econtools.util.gentools import force_list, force_df def add_cons(df): df = df.copy() if df.ndim == 1: df = pd.DataFrame(df) df['_cons'] = np.ones(df.shape[0]) return df def flag_sample(df, *args): varlist = [] for var in args: if var is not None: varlist += force_list(var) sample = df[varlist].notnull().all(axis=1) return sample def set_sample(df, sample, names): return tuple(_set_samp_core(df, sample, names)) def _set_samp_core(df, sample, names): for name in names: if name is None: yield None else: yield df.loc[sample, name].copy().reset_index(drop=True) def demeaner(A, *args): return tuple(_demean_guts(A.squeeze(), args)) def _demean_guts(A, args): for df in args: if df is None or df.empty: yield df else: group_name = A.name mean = df.groupby(A).mean() large_mean = force_df(A).join(mean, on=group_name).drop(group_name, axis=1) if df.ndim == 1: large_mean = large_mean.squeeze() demeaned = df - large_mean yield demeaned def unpack_shac_args(argdict): if argdict is None: return None, None, None, None extra_args = set(argdict.keys()).difference(set(('x', 'y', 'band', 'kern'))) if extra_args: err_str = 'Extra `shac` args: {}' raise ValueError(err_str.format(tuple(extra_args))) shac_x = argdict['x'] shac_y = argdict['y'] shac_band = argdict['band'] shac_kern = argdict['kern'] return shac_x, shac_y, shac_band, shac_kern
BSD 3-Clause New or Revised License
aurzenligl/prophy
prophyc/parsers/prophy.py
Parser.p_enum_def
python
def p_enum_def(self, t): node = model.Enum(t[2], t[3]) self.typedecls[t[2]] = node self.nodes.append(node)
enum_def : ENUM unique_id enum_body SEMI
https://github.com/aurzenligl/prophy/blob/4e60162b3871aa7c43a38f273c2840ce4cf758f7/prophyc/parsers/prophy.py#L212-L216
import os from contextlib import contextmanager import ply.lex as lex import ply.yacc as yacc from prophyc import file_processor, model, six def get_column(input_, pos): return pos - input_.rfind('\n', 0, pos) class Parser(object): literals = ['+', '-', '*', '/', '(', ')', '#'] keywords = ( "const", "enum", "typedef", "struct", "union", "u8", "u16", "u32", "u64", "i8", "i16", "i32", "i64", "float", "double", "bytes" ) tokens = tuple([t.upper() for t in keywords]) + ( "ID", "PATH", "CONST8", "CONST10", "CONST16", "LBRACKET", "RBRACKET", "LBRACE", "RBRACE", "LT", "GT", "SEMI", "COLON", "EQUALS", "COMMA", "DOTS", "LSHIFT", "RSHIFT", "AT" ) def t_ID(self, t): if t.value in self.keywords: t.type = t.value.upper() return t def t_PATH(self, t): return t def t_CONST16(self, t): t.value = int(t.value, 16) return t def t_CONST8(self, t): t.value = int(t.value, 8) return t def t_CONST10(self, t): t.value = int(t.value) return t t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_SEMI = r';' t_COLON = r':' t_LT = r'<' t_GT = r'>' t_EQUALS = r'=' t_COMMA = r',' t_DOTS = r'\.\.\.' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_AT = r'@' t_ignore = ' \t\r' def t_newline(self, t): t.lexer.lineno += len(t.value) def t_comment(self, t): t.lexer.lineno += t.value.count('\n') def t_linecomment(self, t): t.lexer.lineno += 1 def t_error(self, t): t.lexer.skip(1) self._parser_error("illegal character '{}'".format(t.value[0]), t.lexer.lineno, t.lexpos) precedence = ( ('left', '+', '-'), ('left', '*', '/'), ('left', 'LSHIFT', 'RSHIFT'), ('right', 'UMINUS') ) def __init__(self): self._init_parse_data() self.lexer = lex.lex(module=self, debug=0) self.yacc = yacc.yacc(module=self, tabmodule='parsetab_prophy', write_tables=0, debug=0) def parse(self, input_, parse_error_prefix, parse_file): self._init_parse_data(parse_error_prefix) self.parse_file = parse_file self.lexer.lineno = 1 self.yacc.parse(input_, lexer=self.lexer) return self.nodes def _init_parse_data(self, parse_error_prefix=""): self.nodes = [] self.typedecls = {} self.constdecls = {} self.errors = [] self.parse_error_prefix = parse_error_prefix def _parser_error(self, message, line, pos): self.errors.append(("{}:{}:{}".format( self.parse_error_prefix, line, get_column(self.lexer.lexdata, pos) ), message)) def _parser_check(self, condition, message, line, pos): if not condition: self._parser_error(message, line, pos) def _validate_struct_members(self, members): fieldnames = set() for i, (member, line, pos) in enumerate(members): name = member.name self._parser_check( name not in fieldnames, "field '{}' redefined".format(name), line, pos ) fieldnames.add(name) if member.bound: bound, _, __ = next(six.ifilter(lambda m: m[0].name == member.bound, members[:i]), (None, None, None)) if bound: self._parser_check(self._is_type_sizer_compatible(bound.type_name), "Sizer of '{}' has to be of (unsigned) integer type".format(name), line, pos) else: self._parser_error("Sizer of '{}' has to be defined before the array".format(name), line, pos) for member, line, pos in members[:-1]: self._parser_check( not member.greedy and member.kind != model.Kind.UNLIMITED, "greedy array field '{}' not last".format(member.name), line, pos ) def _is_type_sizer_compatible(self, typename): if typename in {type_ + width for type_ in 'ui' for width in ['8', '16', '32', '64']}: return True elif typename in self.typedecls and isinstance(self.typedecls[typename], model.Typedef): return self._is_type_sizer_compatible(self.typedecls[typename].type_name) else: return False def p_specification(self, t): def p_definition_list(self, t): def p_definition(self, t): def p_include_def(self, t): self._parser_check( t[2] == 'include', "unknown directive '{}'".format(t[2]), t.lineno(2), t.lexpos(2) ) path = t[3][1:-1] stem = os.path.splitext(os.path.basename(path))[0] try: nodes = self.parse_file(path) except (file_processor.CyclicIncludeError, file_processor.FileNotFoundError) as e: self._parser_error(str(e), t.lineno(3), t.lexpos(3)) nodes = [] for node in nodes: if isinstance(node, model.Constant): self.constdecls[node.name] = node if isinstance(node, model.Enum): for mem in node.members: self.constdecls[mem.name] = mem if isinstance(node, (model.Typedef, model.Enum, model.Struct, model.Union)): self.typedecls[node.name] = node node = model.Include(stem, nodes) self.nodes.append(node) def p_constant_def(self, t): node = model.Constant(t[2], str(t[4])) self.constdecls[t[2]] = node self.nodes.append(node)
MIT License
tyberiusprime/i3-instant-layout
src/i3_instant_layout/main.py
list_layouts_in_smart_order
python
def list_layouts_in_smart_order(): usage = load_usage() sort_me = [] for layout in layouts.layouts: if " " in layout.name: raise ValueError( f"No spaces in layout names please. Offender: '{layout.name}'" ) for alias in [layout.name] + layout.aliases: usage_count, last_used = usage.get( alias, (0, datetime.datetime.now().timestamp()) ) if alias == layout.name: desc = alias else: desc = f"{alias} ({layout.name})" sort_me.append( (-1 * math.ceil(math.log10(usage_count + 1)), -1 * last_used, desc) ) sort_me.sort() for _, _, name in sort_me: print(name)
List the layouts in a 'smart' order, that means most common ones on top (by log10 usage), within one log10 unit, sorted by most-recently-used
https://github.com/tyberiusprime/i3-instant-layout/blob/920f88356763fe8b5816d2e1f3873cfebe6265e7/src/i3_instant_layout/main.py#L145-L169
import asyncio import i3ipc import datetime import json import math import subprocess import sys import tempfile from pathlib import Path from . import layouts, __version__ counter_file = Path("~/.local/share/i3-instant-layout/counter.json").expanduser() counter_file.parent.mkdir(exist_ok=True, parents=True) def append_layout(layout_dict, window_count): tf = tempfile.NamedTemporaryFile(suffix=".json") tf.write(json.dumps(layout_dict, indent=4).encode("utf-8")) tf.flush() cmd = ["i3-msg", "append_layout", str(Path(tf.name).absolute())] subprocess.check_call(cmd, stdout=subprocess.PIPE) tf.close() def nuke_swallow_windows(): to_nuke = set() def walk_tree(con): if con.ipc_data.get("swallows", False): to_nuke.add(con.ipc_data["window"]) for d in con.descendants(): walk_tree(d) i3 = i3ipc.Connection() tree = i3.get_tree().find_focused().workspace() walk_tree(tree) for window_id in to_nuke: subprocess.check_call(["xdotool", "windowclose", str(window_id)]) def get_window_ids(): desktop = subprocess.check_output( ["xprop", "-notype", "-root", "_NET_CURRENT_DESKTOP"] ).decode("utf-8", errors="replace") desktop = desktop[desktop.rfind("=") + 2 :].strip() res = subprocess.check_output( [ "xdotool", "search", "--all", "--onlyvisible", "--desktop", desktop, "--class", "^.*", ] ).decode("utf-8", errors="replace") return res.strip().split("\n") def get_active_window(): return ( subprocess.check_output(["xdotool", "getactivewindow"]).decode("utf-8").strip() ) def focus_window(id): return subprocess.check_call( ["i3-msg", f'[id="{id}"]', "focus"], stdout=subprocess.PIPE ) def apply_layout(layout, dry_run=False): active = get_active_window() windows = get_window_ids() windows = [active] + [x for x in windows if x != active] window_count = len(windows) unmap_cmd = [ "xdotool", ] map_cmd = [ "xdotool", ] t = layout.get_json(window_count) if isinstance(t, tuple): layout_dict, remap_order = t if set(range(window_count)) != set(remap_order): raise ValueError("Layout returned invalid remap order") windows = [windows[ii] for ii in remap_order] else: layout_dict = t if dry_run: print(json.dumps(layout_dict, indent=4)) else: if layout_dict is not False: append_layout(layout_dict, window_count) for window_id in windows: unmap_cmd.append("windowunmap") map_cmd.append("windowmap") unmap_cmd.append(str(window_id)) map_cmd.append(str(window_id)) subprocess.check_call(unmap_cmd) subprocess.check_call(map_cmd) focus_window(active) def load_usage(): try: with open(counter_file, "r") as op: return json.load(op) except (OSError, ValueError): return {} def count_usage(layout_name): usage = load_usage() if layout_name not in usage: usage[layout_name] = (0, datetime.datetime.now().timestamp()) usage[layout_name] = ( usage[layout_name][0] + 1, datetime.datetime.now().timestamp(), ) with open(counter_file, "w") as op: json.dump(usage, op)
MIT License
pytorch/fairseq
examples/speech_synthesis/evaluation/get_eval_manifest.py
main
python
def main(args): in_root = Path(args.generation_root).resolve() ext = args.audio_format with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE ) header = ["id", "syn", "ref", "text", "speaker"] f_out.write("\t".join(header) + "\n") for row in reader: dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}" id_ = row["id"] syn = (in_root / dir_name / f"{id_}.{ext}").as_posix() ref = row["audio"] if args.use_resynthesized_target: ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix() sample = [id_, syn, ref, row["tgt_text"], row["speaker"]] f_out.write("\t".join(sample) + "\n") print(f"wrote evaluation file to {args.output_path}")
`uid syn ref text`
https://github.com/pytorch/fairseq/blob/fcca32258c8e8bcc9f9890bf4714fa2f96b6b3e1/examples/speech_synthesis/evaluation/get_eval_manifest.py#L11-L33
import csv from pathlib import Path
MIT License
ricsinaruto/dialog-eval
code/metrics/divergence_metrics.py
DivergenceMetrics.__init__
python
def __init__(self, vocab, gt_path): self.vocab = vocab self.gt_path = gt_path self.metrics = {'unigram-kl-div': [], 'bigram-kl-div': []}
Params: :vocab: Vocabulary dictionary. :gt_path: Path to ground truth file.
https://github.com/ricsinaruto/dialog-eval/blob/af95efcdcea8499b1c48a909aae1d8643efabe21/code/metrics/divergence_metrics.py#L8-L18
import math from utils import utils class DivergenceMetrics():
MIT License
openstack/horizon
openstack_dashboard/api/neutron.py
FloatingIpManager.list_pools
python
def list_pools(self): search_opts = {'router:external': True} return [FloatingIpPool(pool) for pool in self.client.list_networks(**search_opts).get('networks')]
Fetches a list of all floating IP pools. :returns: List of FloatingIpPool objects
https://github.com/openstack/horizon/blob/5e405d71926764b8aa60c75794b62f668f4e8122/openstack_dashboard/api/neutron.py#L553-L560
import collections from collections.abc import Sequence import copy import logging import netaddr from django.conf import settings from django.utils.translation import ugettext_lazy as _ from neutronclient.common import exceptions as neutron_exc from neutronclient.v2_0 import client as neutron_client from novaclient import exceptions as nova_exc from horizon import exceptions from horizon import messages from horizon.utils.memoized import memoized from openstack_dashboard.api import base from openstack_dashboard.api import nova from openstack_dashboard.contrib.developer.profiler import api as profiler from openstack_dashboard import policy from openstack_dashboard.utils import settings as setting_utils LOG = logging.getLogger(__name__) IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'} OFF_STATE = 'OFF' ON_STATE = 'ON' ROUTER_INTERFACE_OWNERS = ( 'network:router_interface', 'network:router_interface_distributed', 'network:ha_router_replicated_interface' ) VNIC_TYPES = [ ('normal', _('Normal')), ('direct', _('Direct')), ('direct-physical', _('Direct Physical')), ('macvtap', _('MacVTap')), ('baremetal', _('Bare Metal')), ('virtio-forwarder', _('Virtio Forwarder')), ] class NeutronAPIDictWrapper(base.APIDictWrapper): def __init__(self, apidict): if 'admin_state_up' in apidict: if apidict['admin_state_up']: apidict['admin_state'] = 'UP' else: apidict['admin_state'] = 'DOWN' apidict.update({ key.replace(':', '__'): value for key, value in apidict.items() if ':' in key }) super().__init__(apidict) def set_id_as_name_if_empty(self, length=8): try: if not self._apidict['name'].strip(): id = self._apidict['id'] if length: id = id[:length] self._apidict['name'] = '(%s)' % id except KeyError: pass def items(self): return self._apidict.items() @property def name_or_id(self): return (self._apidict.get('name').strip() or '(%s)' % self._apidict['id'][:13]) class Agent(NeutronAPIDictWrapper): class Network(NeutronAPIDictWrapper): class Subnet(NeutronAPIDictWrapper): def __init__(self, apidict): apidict['ipver_str'] = get_ipver_str(apidict['ip_version']) super().__init__(apidict) AUTO_ALLOCATE_ID = '__auto_allocate__' class PreAutoAllocateNetwork(Network): def __init__(self, request): tenant_id = request.user.tenant_id auto_allocated_subnet = Subnet({ 'name': 'auto_allocated_subnet', 'id': AUTO_ALLOCATE_ID, 'network_id': 'auto', 'tenant_id': tenant_id, 'ip_version': 4, 'cidr': '0.0.0.0/0', }) auto_allocated_network = { 'name': 'auto_allocated_network', 'description': 'Network to be allocated automatically', 'id': AUTO_ALLOCATE_ID, 'status': 'ACTIVE', 'admin_state_up': True, 'shared': False, 'router:external': False, 'subnets': [auto_allocated_subnet], 'tenant_id': tenant_id, } super().__init__(auto_allocated_network) class Trunk(NeutronAPIDictWrapper): @property def subport_count(self): return len(self._apidict.get('sub_ports', [])) def to_dict(self): trunk_dict = super().to_dict() trunk_dict['name_or_id'] = self.name_or_id trunk_dict['subport_count'] = self.subport_count return trunk_dict class SubnetPool(NeutronAPIDictWrapper): class Port(NeutronAPIDictWrapper): def __init__(self, apidict): if 'mac_learning_enabled' in apidict: apidict['mac_state'] = ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE pairs = apidict.get('allowed_address_pairs') if pairs: apidict = copy.deepcopy(apidict) wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs] apidict['allowed_address_pairs'] = wrapped_pairs super().__init__(apidict) class PortTrunkParent(Port): class PortTrunkSubport(Port): def __init__(self, apidict, trunk_subport_info): for field in ['trunk_id', 'segmentation_type', 'segmentation_id']: apidict[field] = trunk_subport_info[field] super().__init__(apidict) class PortAllowedAddressPair(NeutronAPIDictWrapper): def __init__(self, addr_pair): super().__init__(addr_pair) self.id = addr_pair['ip_address'] class Router(NeutronAPIDictWrapper): class RouterStaticRoute(NeutronAPIDictWrapper): def __init__(self, route): super().__init__(route) self.id = route['nexthop'] + ":" + route['destination'] class SecurityGroup(NeutronAPIDictWrapper): def __init__(self, sg, sg_dict=None): if sg_dict is None: sg_dict = {sg['id']: sg['name']} if 'security_group_rules' not in sg: sg['security_group_rules'] = [] sg['rules'] = [SecurityGroupRule(rule, sg_dict) for rule in sg['security_group_rules']] super().__init__(sg) def to_dict(self): return {k: self._apidict[k] for k in self._apidict if k != 'rules'} class SecurityGroupRule(NeutronAPIDictWrapper): def _get_secgroup_name(self, sg_id, sg_dict): if not sg_id: return '' if sg_dict is None: sg_dict = {} return sg_dict.get(sg_id, sg_id[:13]) def __init__(self, sgr, sg_dict=None): if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']: if sgr['ethertype'] == 'IPv6': sgr['remote_ip_prefix'] = '::/0' else: sgr['remote_ip_prefix'] = '0.0.0.0/0' rule = { 'id': sgr['id'], 'parent_group_id': sgr['security_group_id'], 'direction': sgr['direction'], 'ethertype': sgr['ethertype'], 'ip_protocol': sgr['protocol'], 'from_port': sgr['port_range_min'], 'to_port': sgr['port_range_max'], 'description': sgr.get('description', '') } cidr = sgr['remote_ip_prefix'] rule['ip_range'] = {'cidr': cidr} if cidr else {} group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict) rule['group'] = {'name': group} if group else {} super().__init__(rule) def __str__(self): if 'name' in self.group: remote = self.group['name'] elif 'cidr' in self.ip_range: remote = self.ip_range['cidr'] else: remote = 'ANY' direction = 'to' if self.direction == 'egress' else 'from' if self.from_port: if self.from_port == self.to_port: proto_port = ("%s/%s" % (self.from_port, self.ip_protocol.lower())) else: proto_port = ("%s-%s/%s" % (self.from_port, self.to_port, self.ip_protocol.lower())) elif self.ip_protocol: try: ip_proto = int(self.ip_protocol) proto_port = "ip_proto=%d" % ip_proto except Exception: proto_port = self.ip_protocol else: proto_port = '' return (_('ALLOW %(ethertype)s %(proto_port)s ' '%(direction)s %(remote)s') % {'ethertype': self.ethertype, 'proto_port': proto_port, 'remote': remote, 'direction': direction}) class SecurityGroupManager(object): backend = 'neutron' def __init__(self, request): self.request = request self.client = neutronclient(request) def _list(self, **filters): secgroups = self.client.list_security_groups(**filters) return [SecurityGroup(sg) for sg in secgroups.get('security_groups')] @profiler.trace def list(self, **params): tenant_id = params.pop('tenant_id', self.request.user.tenant_id) if tenant_id: params['tenant_id'] = tenant_id return self._list(**params) def _sg_name_dict(self, sg_id, rules): related_ids = set([sg_id]) related_ids |= set(filter(None, [r['remote_group_id'] for r in rules])) related_sgs = self.client.list_security_groups(id=related_ids, fields=['id', 'name']) related_sgs = related_sgs.get('security_groups') return dict((sg['id'], sg['name']) for sg in related_sgs) @profiler.trace def get(self, sg_id): secgroup = self.client.show_security_group(sg_id).get('security_group') sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules']) return SecurityGroup(secgroup, sg_dict) @profiler.trace def create(self, name, desc): body = {'security_group': {'name': name, 'description': desc, 'tenant_id': self.request.user.project_id}} secgroup = self.client.create_security_group(body) return SecurityGroup(secgroup.get('security_group')) @profiler.trace def update(self, sg_id, name, desc): body = {'security_group': {'name': name, 'description': desc}} secgroup = self.client.update_security_group(sg_id, body) return SecurityGroup(secgroup.get('security_group')) @profiler.trace def delete(self, sg_id): self.client.delete_security_group(sg_id) @profiler.trace def rule_create(self, parent_group_id, direction=None, ethertype=None, ip_protocol=None, from_port=None, to_port=None, cidr=None, group_id=None, description=None): if not cidr: cidr = None if isinstance(from_port, int) and from_port < 0: from_port = None if isinstance(to_port, int) and to_port < 0: to_port = None if isinstance(ip_protocol, int) and ip_protocol < 0: ip_protocol = None params = {'security_group_id': parent_group_id, 'direction': direction, 'ethertype': ethertype, 'protocol': ip_protocol, 'port_range_min': from_port, 'port_range_max': to_port, 'remote_ip_prefix': cidr, 'remote_group_id': group_id} if description is not None: params['description'] = description body = {'security_group_rule': params} try: rule = self.client.create_security_group_rule(body) except neutron_exc.OverQuotaClient: raise exceptions.Conflict( _('Security group rule quota exceeded.')) except neutron_exc.Conflict: raise exceptions.Conflict( _('Security group rule already exists.')) rule = rule.get('security_group_rule') sg_dict = self._sg_name_dict(parent_group_id, [rule]) return SecurityGroupRule(rule, sg_dict) @profiler.trace def rule_delete(self, sgr_id): self.client.delete_security_group_rule(sgr_id) @profiler.trace def list_by_instance(self, instance_id): ports = port_list(self.request, device_id=instance_id) sg_ids = [] for p in ports: sg_ids += p.security_groups return self._list(id=set(sg_ids)) if sg_ids else [] @profiler.trace def update_instance_security_group(self, instance_id, new_security_group_ids): ports = port_list(self.request, device_id=instance_id) for p in ports: params = {'security_groups': new_security_group_ids} port_update(self.request, p.id, **params) class FloatingIp(base.APIDictWrapper): _attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'instance_type', 'pool', 'dns_domain', 'dns_name'] def __init__(self, fip): fip['ip'] = fip['floating_ip_address'] fip['fixed_ip'] = fip['fixed_ip_address'] fip['pool'] = fip['floating_network_id'] super().__init__(fip) class FloatingIpPool(base.APIDictWrapper): pass class FloatingIpTarget(base.APIDictWrapper): def __init__(self, port, ip_address, label): name = '%s: %s' % (label, ip_address) if label else ip_address target = {'name': name, 'id': '%s_%s' % (port.id, ip_address), 'port_id': port.id, 'instance_id': port.device_id} super().__init__(target) class FloatingIpManager(object): device_owner_map = { 'compute:': 'compute', 'neutron:LOADBALANCER': 'loadbalancer', } def __init__(self, request): self.request = request self.client = neutronclient(request) @profiler.trace
Apache License 2.0
quantumlib/openfermion-projectq
openfermionprojectq/_ffft.py
fourier_transform_0
python
def fourier_transform_0(register, mode_a, mode_b): operator = fourier_transform_0_generator(mode_a, mode_b) jw_operator = jordan_wigner(operator) Z | register[mode_b] TimeEvolution(numpy.pi / 8., jw_operator) | register
Apply the fermionic Fourier transform to two modes. The fermionic Fourier transform is applied to the qubits corresponding to mode_a and mode_b, , after the Jordan-Wigner Args: register (projectq.Qureg): The register of qubits to act on. mode_a, mode_b (int): The two modes to Fourier transform.
https://github.com/quantumlib/openfermion-projectq/blob/f7ffa1df9b8b6f96b956e2e53f8eb7735c5eab12/openfermionprojectq/_ffft.py#L113-L126
import itertools import numpy from openfermionprojectq import TimeEvolution from openfermionprojectq._parallel_bubble_sort import ( index_of_position_in_1d_array, parallel_bubble_sort) from openfermion.ops import FermionOperator, QubitOperator from openfermion.transforms import jordan_wigner from openfermion.utils import fourier_transform, Grid, normal_ordered from projectq import MainEngine from projectq.ops import (H, X, Y, Z, C, Rx, Ry, Rz, Ph, Swap, Measure, All) def fswap_generator(mode_a, mode_b): return (FermionOperator('') + FermionOperator(((mode_a, 1), (mode_b, 0))) + FermionOperator(((mode_b, 1), (mode_a, 0))) - FermionOperator(((mode_a, 1), (mode_a, 0))) - FermionOperator(((mode_b, 1), (mode_b, 0)))) def fswap(register, mode_a, mode_b, fermion_to_spin_mapping=jordan_wigner): operator = fswap_generator(mode_a, mode_b) TimeEvolution(numpy.pi / 2., fermion_to_spin_mapping(operator)) | register def fswap_adjacent(register, mode): operator = (QubitOperator(((mode, 'Z'),)) + QubitOperator(((mode + 1, 'Z'),)) + QubitOperator(((mode, 'X'), (mode + 1, 'X'))) + QubitOperator(((mode, 'Y'), (mode + 1, 'Y')))) TimeEvolution(numpy.pi / 4., operator) | register def apply_phase(register, mode, phase): Rz(2 * phase) | register[mode] def fourier_transform_0_generator(mode_a, mode_b): xy = ((FermionOperator(((mode_a, 1),)) - 1j * FermionOperator(((mode_a, 0),))) * (FermionOperator(((mode_b, 1),)) - 1j * FermionOperator(((mode_b, 0),)))) yx = ((FermionOperator(((mode_a, 1),)) + 1j * FermionOperator(((mode_a, 0),))) * (FermionOperator(((mode_b, 1),)) + 1j * FermionOperator(((mode_b, 0),)))) return xy - yx
Apache License 2.0
sailthru/relay.mesos
relay_mesos/for_demo.py
target_value
python
def target_value(): while True: yield 40
An example target used by the relay.mesos demo to set the target number of currently running tasks at a given point in time
https://github.com/sailthru/relay.mesos/blob/7728dcecb8f31cfe6eae1162e92f359750771487/relay_mesos/for_demo.py#L19-L25
import json import os import urllib2 def num_active_mesos_tasks(): while True: data = json.load(urllib2.urlopen( os.environ['RELAY_MESOS_MASTER_STATE_FOR_DEMO'])) yield data['started_tasks'] + data['staged_tasks'] - ( data['failed_tasks'] + data['killed_tasks'] + data['lost_tasks'] + data['finished_tasks'])
Apache License 2.0
skoda091/alfred-deepl
lib/urllib3/connection.py
HTTPConnection.request_chunked
python
def request_chunked(self, method, url, body=None, headers=None): headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers skip_host = 'host' in headers self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers: self.putheader('Transfer-Encoding', 'chunked') self.endheaders() if body is not None: stringish_types = six.string_types + (six.binary_type,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') len_str = hex(len(chunk))[2:] self.send(len_str.encode('utf-8')) self.send(b'\r\n') self.send(chunk) self.send(b'\r\n') self.send(b'0\r\n\r\n')
Alternative to the common request method, which sends the body with chunked encoding and not as one block
https://github.com/skoda091/alfred-deepl/blob/8c7d7a572011bdee3888211a90a161ea50bb81af/lib/urllib3/connection.py#L169-L205
from __future__ import absolute_import import datetime import logging import os import sys import socket from socket import error as SocketError, timeout as SocketTimeout import warnings from .packages import six from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection from .packages.six.moves.http_client import HTTPException try: import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None class BaseSSLError(BaseException): pass try: ConnectionError = ConnectionError except NameError: class ConnectionError(Exception): pass from .exceptions import ( NewConnectionError, ConnectTimeoutError, SubjectAltNameWarning, SystemTimeWarning, ) from .packages.ssl_match_hostname import match_hostname, CertificateError from .util.ssl_ import ( resolve_cert_reqs, resolve_ssl_version, assert_fingerprint, create_urllib3_context, ssl_wrap_socket ) from .util import connection from ._collections import HTTPHeaderDict log = logging.getLogger(__name__) port_by_scheme = { 'http': 80, 'https': 443, } RECENT_DATE = datetime.date(2016, 1, 1) class DummyConnection(object): pass class HTTPConnection(_HTTPConnection, object): default_port = port_by_scheme['http'] default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] is_verified = False def __init__(self, *args, **kw): if six.PY3: kw.pop('strict', None) self.source_address = kw.get('source_address') if sys.version_info < (2, 7): kw.pop('source_address', None) self.socket_options = kw.pop('socket_options', self.default_socket_options) _HTTPConnection.__init__(self, *args, **kw) def _new_conn(self): extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self.host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn def _prepare_conn(self, conn): self.sock = conn if getattr(self, '_tunnel_host', None): self._tunnel() self.auto_open = 0 def connect(self): conn = self._new_conn() self._prepare_conn(conn)
MIT License
hkust-knowcomp/mlma_hate_speech
run_sluice_net.py
check_activation_function
python
def check_activation_function(arg): try: functions = [dynet.rectify, dynet.tanh] functions = {function.__name__: function for function in functions} functions['None'] = None return functions[str(arg)] except: raise argparse.ArgumentTypeError( 'String {} does not match required format'.format(arg, ))
Checks allowed argument for --ac option.
https://github.com/hkust-knowcomp/mlma_hate_speech/blob/8c4c2268ed5f93a801ecd0aef31a12dd9d07d332/run_sluice_net.py#L18-L27
import argparse import os import random import sys import numpy as np import dynet from constants import TASK_NAMES, LANGUAGES, EMBEDS, BALANCED, IMBALANCED, SGD, ADAM from sluice_net import SluiceNetwork, load import utils
MIT License
kazhala/fzf.aws
fzfaws/utils/fileloader.py
FileLoader._set_ec2_env
python
def _set_ec2_env(self, ec2_settings: Dict[str, Any]) -> None: if not ec2_settings: return if ec2_settings.get("keypair"): os.environ["FZFAWS_EC2_KEYPAIRS"] = ec2_settings.get("keypair", "") if ec2_settings.get("waiter"): os.environ["FZFAWS_EC2_WAITER"] = json.dumps(ec2_settings.get("waiter", {})) if ec2_settings.get("default_args"): for key, value in ec2_settings.get("default_args").items(): os.environ["FZFAWS_EC2_%s" % key.upper()] = value if ec2_settings.get("profile"): os.environ["FZFAWS_EC2_PROFILE"] = ec2_settings["profile"] if ec2_settings.get("region"): os.environ["FZFAWS_EC2_REGION"] = ec2_settings["region"]
Set ec2 service settings. :param ec2_settings: ec2 settings from config file :type ec2_settings: Dict[str, Any]
https://github.com/kazhala/fzf.aws/blob/4abefb2301f7b489b11ed3f0b303faafa5941d5b/fzfaws/utils/fileloader.py#L172-L190
import json import os from typing import Any, Dict import yaml from yaml.error import YAMLError yaml.SafeLoader.add_multi_constructor("!", lambda loader, suffix, node: None) class FileLoader: def __init__(self, path: str = None, body: str = None) -> None: if path == None: path = "" if body == None: body = "" self.path: str = path self.body: str = body def process_yaml_file(self) -> Dict[str, Any]: with open(self.path, "r") as file: body = file.read() formated_body = yaml.safe_load(body) return {"body": body, "dictBody": formated_body} def process_json_file(self) -> Dict[str, Any]: with open(self.path, "r") as file: body = file.read() formated_body = json.loads(body) return {"body": body, "dictBody": formated_body} def process_yaml_body(self) -> dict: return yaml.safe_load(self.body) def process_json_body(self) -> dict: return json.loads(self.body) def load_config_file(self, config_path: str = None) -> None: if not config_path: home = os.path.expanduser("~") base_directory = os.getenv("XDG_CONFIG_HOME", "%s/.config" % home) config_path = "%s/fzfaws/fzfaws.yml" % base_directory if not os.path.isfile(config_path): return with open(config_path, "r") as file: try: body = file.read() formated_body = yaml.safe_load(body) if not formated_body: return self._set_fzf_env(formated_body.get("fzf", {})) self._set_spinner_env(formated_body.get("spinner", {})) self._set_gloable_env(formated_body.get("global", {})) if not formated_body.get("services"): return else: self._set_ec2_env(formated_body["services"].get("ec2", {})) self._set_s3_env(formated_body["services"].get("s3", {})) self._set_cloudformation_env( formated_body["services"].get("cloudformation", {}) ) except YAMLError as e: print("Config file is malformed, please double check your config file") print(e) def _set_spinner_env(self, spinner_settings: Dict[str, Any]) -> None: if not spinner_settings: return if spinner_settings.get("message"): os.environ["FZFAWS_SPINNER_MESSAGE"] = spinner_settings["message"] if spinner_settings.get("speed"): os.environ["FZFAWS_SPINNER_SPEED"] = str(spinner_settings["speed"]) if spinner_settings.get("pattern"): os.environ["FZFAWS_SPINNER_PATTERN"] = spinner_settings["pattern"] def _set_cloudformation_env(self, cloudformation_settings: Dict[str, Any]) -> None: if not cloudformation_settings: return if cloudformation_settings.get("profile"): os.environ["FZFAWS_CLOUDFORMATION_PROFILE"] = cloudformation_settings[ "profile" ] if cloudformation_settings.get("region"): os.environ["FZFAWS_CLOUDFORMATION_REGION"] = cloudformation_settings[ "region" ] if cloudformation_settings.get("default_args"): for key, value in cloudformation_settings["default_args"].items(): os.environ["FZFAWS_CLOUDFORMATION_%s" % key.upper()] = value if cloudformation_settings.get("waiter"): os.environ["FZFAWS_CLOUDFORMATION_WAITER"] = json.dumps( cloudformation_settings.get("waiter", {}) ) def _set_s3_env(self, s3_settings: Dict[str, Any]) -> None: if not s3_settings: return if s3_settings.get("transfer_config"): os.environ["FZFAWS_S3_TRANSFER"] = json.dumps( s3_settings["transfer_config"] ) if s3_settings.get("profile"): os.environ["FZFAWS_S3_PROFILE"] = s3_settings["profile"] if s3_settings.get("default_args"): for key, value in s3_settings.get("default_args").items(): os.environ["FZFAWS_S3_%s" % key.upper()] = value
MIT License
perchlive/django-broadcast
django_broadcast/api.py
start_stream
python
def start_stream(request: HttpRequest) -> dict: stream_type = request.POST.get('type') if stream_type is 'hls': return start_hls_stream(request) raise NotImplementedError
Prepare for a new stream on behalf of the request's author. This involves provisioning remote storage for the stream media and returning any necessary data for the streaming client. :param request: :return:
https://github.com/perchlive/django-broadcast/blob/20ddd5a2c42dcb98a7dd88ba001c69524a1fa733/django_broadcast/api.py#L24-L35
import datetime from django.core import serializers from django.core.exceptions import PermissionDenied from django.http import HttpRequest from storage_provisioner.provisioner import S3StorageProvisioner from storage_provisioner.storage import S3Storage from django_broadcast import settings from django_broadcast.models import HlsStream, Thumbnail __author__ = 'dbro' AWS_ACCESS_KEY_ID = settings.aws_access_key_id AWS_SECRET_ACCESS_KEY = settings.aws_secret_access_key S3_BUCKET_NAME = settings.s3_bucket stream_model = settings.STREAM_MODEL PROVISIONER = S3StorageProvisioner(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
Apache License 2.0
yuta-hi/pytorch_bayesian_unet
pytorch_bcnn/extensions/validator.py
Validator.evaluate
python
def evaluate(self, trainer): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] for target in self._targets.values(): target.eval() if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() self.visualizer.reset() desc = 'valid (iter=%08d)' % trainer.updater.iteration total = len(it.dataset) // it.batch_size for batch in tqdm.tqdm(it, total=total, desc=desc, ncols=80, leave=False): observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) with torch.no_grad(): if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) if self.visualizer.n_examples < self.n_vis: if hasattr(eval_func, 'x') and hasattr(eval_func, 'y') and hasattr(eval_func, 't'): self.visualizer.add_batch(eval_func.x, eval_func.y, eval_func.t) else: warnings.warn('`eval_func` should have attributes' '`x`, `y` and `t` for visualization..') summary.add(observation) filename = self.filename if callable(filename): filename = filename(trainer) else: filename = filename.format(trainer) out = os.path.join(trainer.out, filename) self.visualizer.save(out) return summary.compute_mean()
Evaluates the model and returns a result dictionary. This method runs the evaluation loop over the validation dataset. It accumulates the reported values to :class:`~chainer.DictSummary` and returns a dictionary whose values are means computed by the summary. Note that this function assumes that the main iterator raises ``StopIteration`` or code in the evaluation loop raises an exception. So, if this assumption is not held, the function could be caught in an infinite loop. Users can override this method to customize the evaluation routine. .. note:: This method encloses :attr:`eval_func` calls with :func:`function.no_backprop_mode` context, so all calculations using :class:`~chainer.FunctionNode`\\s inside :attr:`eval_func` do not make computational graphs. It is for reducing the memory consumption. Returns: dict: Result dictionary. This dictionary is further reported via :func:`~chainer.report` without specifying any observer.
https://github.com/yuta-hi/pytorch_bayesian_unet/blob/bb22b44c64f5d83d78aa93880da97e0e6168dc1c/pytorch_bcnn/extensions/validator.py#L106-L192
from __future__ import absolute_import import os import six import copy import tqdm import warnings import numpy as np import torch from pytorch_trainer.dataset import convert from pytorch_trainer import reporter as reporter_module from pytorch_trainer.training.extensions import Evaluator from ..visualizer import Visualizer from ..visualizer import ImageVisualizer _default_visualizer = ImageVisualizer() class Validator(Evaluator): def __init__(self, iterator, target, filename='validation_iter_{.updater.iteration}', visualizer=_default_visualizer, n_vis=None, converter=convert.concat_examples, device=None, eval_hook=None, eval_func=None): super(Validator, self).__init__(iterator, target, converter, device, eval_hook, eval_func) assert isinstance(visualizer, Visualizer) if n_vis is None: n_vis = np.inf self.filename = filename self.visualizer = visualizer self.n_vis = n_vis def initialize(self, trainer): reporter = reporter_module.Reporter() with reporter.scope(trainer.observation): self.report(trainer)
MIT License
graphsense/graphsense-rest
openapi_server/models/search_result.py
SearchResult.from_dict
python
def from_dict(cls, dikt) -> 'SearchResult': return util.deserialize_model(dikt, cls)
Returns the dict as a model :param dikt: A dict. :type: dict :return: The search_result of this SearchResult. # noqa: E501 :rtype: SearchResult
https://github.com/graphsense/graphsense-rest/blob/2e4a9c20835e54d971e3fc3aae5780bc87d48647/openapi_server/models/search_result.py#L42-L50
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from openapi_server.models.base_model_ import Model from openapi_server.models.search_result_by_currency import SearchResultByCurrency from openapi_server import util from openapi_server.models.search_result_by_currency import SearchResultByCurrency class SearchResult(Model): def __init__(self, currencies=None, labels=None): self.openapi_types = { 'currencies': List[SearchResultByCurrency], 'labels': List[str] } self.attribute_map = { 'currencies': 'currencies', 'labels': 'labels' } self._currencies = currencies self._labels = labels @classmethod
MIT License
udst/urbansim_templates
tests/test_segmented_large_multinomial_logit.py
orca_session
python
def orca_session(): d1 = {'oid': np.arange(100), 'group': np.random.choice(['A','B','C'], size=100), 'int_group': np.random.choice([3,4], size=100), 'obsval': np.random.random(100), 'choice': np.random.choice(np.arange(20), size=100)} d2 = {'aid': np.arange(20), 'altval': np.random.random(20)} obs = pd.DataFrame(d1).set_index('oid') orca.add_table('obs', obs) alts = pd.DataFrame(d2).set_index('aid') orca.add_table('alts', alts)
Set up a clean Orca session with a couple of data tables.
https://github.com/udst/urbansim_templates/blob/723b83b4187da53a50ee03fdba4842a464f68240/tests/test_segmented_large_multinomial_logit.py#L14-L32
import numpy as np import pandas as pd import pytest import orca from urbansim.models.util import apply_filter_query from urbansim_templates import modelmanager from urbansim_templates.models import SegmentedLargeMultinomialLogitStep from urbansim_templates.utils import get_data, validate_template @pytest.fixture
BSD 3-Clause New or Revised License
efficiosoft/hass-apps
hass_apps/schedy/actor/base.py
ActorBase.check_config_plausibility
python
def check_config_plausibility(self, state: dict) -> None:
Is called during initialization to warn the user about some possible common configuration mistakes. The entity's current state attributes dictionary is provided.
https://github.com/efficiosoft/hass-apps/blob/2d095d6fffc12bde76a7b8f356fa266de6059b37/hass_apps/schedy/actor/base.py#L145-L148
import typing as T if T.TYPE_CHECKING: import uuid from ..expression.helpers import HelperBase as ExpressionHelperBase from ..room import Room from ..stats import StatisticalParameter import copy import json import observable import voluptuous as vol from ... import common from ..room import sync_proxy class ActorBase: name = "actor" config_defaults = {} config_schema_dict = { "friendly_name": str, vol.Optional("send_retries", default=10): vol.All(int, vol.Range(min=0)), vol.Optional("send_retry_interval", default=30): vol.All(int, vol.Range(min=1)), } expression_helpers = [] stats_param_types = [] def __init__(self, entity_id: str, cfg: dict, room: "Room") -> None: self.entity_id = entity_id self.cfg = cfg self.room = room self.app = room.app self.events = observable.Observable() self.is_initialized = False self._current_value = None self._wanted_value = None self._gave_up_sending = False self._resending_timer = None def __repr__(self) -> str: return "<Actor {}>".format(str(self)) def __str__(self) -> str: return "A:{}".format(self.cfg.get("friendly_name", self.entity_id)) @staticmethod def _preprocess_state(state: T.Optional[dict]) -> dict: attrs = copy.deepcopy(state or {}) attrs.update((attrs or {}).get("attributes", {})) return attrs @sync_proxy def _resending_cb(self, kwargs: dict) -> None: self._resending_timer = None retries = self.cfg["send_retries"] interval = self.cfg["send_retry_interval"] left_tries = kwargs["left_tries"] if not left_tries: self.log( "Gave up sending value after {} retries.".format(retries), level="WARNING" if retries else "DEBUG", ) self._gave_up_sending = True return if left_tries <= retries: self.log("Re-sending value due to missing confirmation.", level="WARNING") self.log( "Setting value {!r} (left tries = {}, interval = {}).".format( self._wanted_value, left_tries, interval ), level="DEBUG", prefix=common.LOG_PREFIX_OUTGOING, ) self.do_send() self._gave_up_sending = False self._resending_timer = self.app.run_in( self._resending_cb, interval, left_tries=left_tries - 1 ) @sync_proxy def _state_cb( self, entity: str, attr: str, old: T.Optional[dict], new: T.Optional[dict], kwargs: dict, ) -> None: attrs = self._preprocess_state(new) previous_value = self._current_value new_value = self.notify_state_changed( attrs ) if new_value is None: return if new_value == self._wanted_value: self.cancel_resending_timer() self._gave_up_sending = False if new_value != previous_value: self._current_value = new_value self.log( "Received value of {}.".format(repr(new_value)), prefix=common.LOG_PREFIX_INCOMING, ) self.events.trigger("value_changed", self, new_value) def after_initialization(self) -> None: def cancel_resending_timer(self) -> None: timer = self._resending_timer if timer is None: return self._resending_timer = None self.app.cancel_timer(timer) self.log("Cancelled re-sending timer.", level="DEBUG")
Apache License 2.0
conlin-huang/aio-scrapy
aioscrapy/extensions/httpcache.py
parse_cachecontrol
python
def parse_cachecontrol(header): directives = {} for directive in header.split(b','): key, sep, val = directive.strip().partition(b'=') if key: directives[key.lower()] = val if sep else None return directives
Parse Cache-Control header https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 >>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None, ... b'max-age': b'3600'} True >>> parse_cachecontrol(b'') == {} True
https://github.com/conlin-huang/aio-scrapy/blob/ba2fbd2a46094e66b2dc8c17fb5adae073356486/aioscrapy/extensions/httpcache.py#L347-L364
import gzip import logging import os import pickle from email.utils import mktime_tz, parsedate_tz from importlib import import_module from time import time from weakref import WeakKeyDictionary from w3lib.http import headers_raw_to_dict, headers_dict_to_raw from scrapy.http import Headers, Response from scrapy.responsetypes import responsetypes from scrapy.utils.httpobj import urlparse_cached from scrapy.utils.project import data_path from scrapy.utils.python import to_bytes, to_unicode from scrapy.utils.request import request_fingerprint logger = logging.getLogger(__name__) class DummyPolicy: def __init__(self, settings): self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES') self.ignore_http_codes = [int(x) for x in settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')] def should_cache_request(self, request): return urlparse_cached(request).scheme not in self.ignore_schemes def should_cache_response(self, response, request): return response.status not in self.ignore_http_codes def is_cached_response_fresh(self, cachedresponse, request): return True def is_cached_response_valid(self, cachedresponse, response, request): return True class RFC2616Policy: MAXAGE = 3600 * 24 * 365 def __init__(self, settings): self.always_store = settings.getbool('HTTPCACHE_ALWAYS_STORE') self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES') self._cc_parsed = WeakKeyDictionary() self.ignore_response_cache_controls = [ to_bytes(cc) for cc in settings.getlist('HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS') ] def _parse_cachecontrol(self, r): if r not in self._cc_parsed: cch = r.headers.get(b'Cache-Control', b'') parsed = parse_cachecontrol(cch) if isinstance(r, Response): for key in self.ignore_response_cache_controls: parsed.pop(key, None) self._cc_parsed[r] = parsed return self._cc_parsed[r] def should_cache_request(self, request): if urlparse_cached(request).scheme in self.ignore_schemes: return False cc = self._parse_cachecontrol(request) if b'no-store' in cc: return False return True def should_cache_response(self, response, request): cc = self._parse_cachecontrol(response) if b'no-store' in cc: return False elif response.status == 304: return False elif self.always_store: return True elif b'max-age' in cc or b'Expires' in response.headers: return True elif response.status in (300, 301, 308): return True elif response.status in (200, 203, 401): return b'Last-Modified' in response.headers or b'ETag' in response.headers else: return False def is_cached_response_fresh(self, cachedresponse, request): cc = self._parse_cachecontrol(cachedresponse) ccreq = self._parse_cachecontrol(request) if b'no-cache' in cc or b'no-cache' in ccreq: return False now = time() freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now) currentage = self._compute_current_age(cachedresponse, request, now) reqmaxage = self._get_max_age(ccreq) if reqmaxage is not None: freshnesslifetime = min(freshnesslifetime, reqmaxage) if currentage < freshnesslifetime: return True if b'max-stale' in ccreq and b'must-revalidate' not in cc: staleage = ccreq[b'max-stale'] if staleage is None: return True try: if currentage < freshnesslifetime + max(0, int(staleage)): return True except ValueError: pass self._set_conditional_validators(request, cachedresponse) return False def is_cached_response_valid(self, cachedresponse, response, request): if response.status >= 500: cc = self._parse_cachecontrol(cachedresponse) if b'must-revalidate' not in cc: return True return response.status == 304 def _set_conditional_validators(self, request, cachedresponse): if b'Last-Modified' in cachedresponse.headers: request.headers[b'If-Modified-Since'] = cachedresponse.headers[b'Last-Modified'] if b'ETag' in cachedresponse.headers: request.headers[b'If-None-Match'] = cachedresponse.headers[b'ETag'] def _get_max_age(self, cc): try: return max(0, int(cc[b'max-age'])) except (KeyError, ValueError): return None def _compute_freshness_lifetime(self, response, request, now): cc = self._parse_cachecontrol(response) maxage = self._get_max_age(cc) if maxage is not None: return maxage date = rfc1123_to_epoch(response.headers.get(b'Date')) or now if b'Expires' in response.headers: expires = rfc1123_to_epoch(response.headers[b'Expires']) return max(0, expires - date) if expires else 0 lastmodified = rfc1123_to_epoch(response.headers.get(b'Last-Modified')) if lastmodified and lastmodified <= date: return (date - lastmodified) / 10 if response.status in (300, 301, 308): return self.MAXAGE return 0 def _compute_current_age(self, response, request, now): currentage = 0 date = rfc1123_to_epoch(response.headers.get(b'Date')) or now if now > date: currentage = now - date if b'Age' in response.headers: try: age = int(response.headers[b'Age']) currentage = max(currentage, age) except ValueError: pass return currentage class DbmCacheStorage: def __init__(self, settings): self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True) self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS') self.dbmodule = import_module(settings['HTTPCACHE_DBM_MODULE']) self.db = None def open_spider(self, spider): dbpath = os.path.join(self.cachedir, f'{spider.name}.db') self.db = self.dbmodule.open(dbpath, 'c') logger.debug("Using DBM cache storage in %(cachepath)s" % {'cachepath': dbpath}, extra={'spider': spider}) def close_spider(self, spider): self.db.close() def retrieve_response(self, spider, request): data = self._read_data(spider, request) if data is None: return url = data['url'] status = data['status'] headers = Headers(data['headers']) body = data['body'] respcls = responsetypes.from_args(headers=headers, url=url) response = respcls(url=url, headers=headers, status=status, body=body) return response def store_response(self, spider, request, response): key = self._request_key(request) data = { 'status': response.status, 'url': response.url, 'headers': dict(response.headers), 'body': response.body, } self.db[f'{key}_data'] = pickle.dumps(data, protocol=4) self.db[f'{key}_time'] = str(time()) def _read_data(self, spider, request): key = self._request_key(request) db = self.db tkey = f'{key}_time' if tkey not in db: return ts = db[tkey] if 0 < self.expiration_secs < time() - float(ts): return return pickle.loads(db[f'{key}_data']) def _request_key(self, request): return request_fingerprint(request) class FilesystemCacheStorage: def __init__(self, settings): self.cachedir = data_path(settings['HTTPCACHE_DIR']) self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS') self.use_gzip = settings.getbool('HTTPCACHE_GZIP') self._open = gzip.open if self.use_gzip else open def open_spider(self, spider): logger.debug("Using filesystem cache storage in %(cachedir)s" % {'cachedir': self.cachedir}, extra={'spider': spider}) def close_spider(self, spider): pass def retrieve_response(self, spider, request): metadata = self._read_meta(spider, request) if metadata is None: return rpath = self._get_request_path(spider, request) with self._open(os.path.join(rpath, 'response_body'), 'rb') as f: body = f.read() with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f: rawheaders = f.read() url = metadata.get('response_url') status = metadata['status'] headers = Headers(headers_raw_to_dict(rawheaders)) respcls = responsetypes.from_args(headers=headers, url=url) response = respcls(url=url, headers=headers, status=status, body=body) return response def store_response(self, spider, request, response): rpath = self._get_request_path(spider, request) if not os.path.exists(rpath): os.makedirs(rpath) metadata = { 'url': request.url, 'method': request.method, 'status': response.status, 'response_url': response.url, 'timestamp': time(), } with self._open(os.path.join(rpath, 'meta'), 'wb') as f: f.write(to_bytes(repr(metadata))) with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f: pickle.dump(metadata, f, protocol=4) with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f: f.write(headers_dict_to_raw(response.headers)) with self._open(os.path.join(rpath, 'response_body'), 'wb') as f: f.write(response.body) with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f: f.write(headers_dict_to_raw(request.headers)) with self._open(os.path.join(rpath, 'request_body'), 'wb') as f: f.write(request.body) def _get_request_path(self, spider, request): key = request_fingerprint(request) return os.path.join(self.cachedir, spider.name, key[0:2], key) def _read_meta(self, spider, request): rpath = self._get_request_path(spider, request) metapath = os.path.join(rpath, 'pickled_meta') if not os.path.exists(metapath): return mtime = os.stat(metapath).st_mtime if 0 < self.expiration_secs < time() - mtime: return with self._open(metapath, 'rb') as f: return pickle.load(f)
MIT License
alvarobartt/investpy
investpy/indices.py
get_index_information
python
def get_index_information(index, country, as_json=False): if not index: raise ValueError("ERR#0047: index param is mandatory and should be a str.") if not isinstance(index, str): raise ValueError("ERR#0047: index param is mandatory and should be a str.") if country is None: raise ValueError("ERR#0039: country can not be None, it should be a str.") if country is not None and not isinstance(country, str): raise ValueError("ERR#0025: specified country value not valid.") if not isinstance(as_json, bool): raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.") resource_package = 'investpy' resource_path = '/'.join(('resources', 'indices.csv')) if pkg_resources.resource_exists(resource_package, resource_path): indices = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False) else: raise FileNotFoundError("ERR#0059: indices file not found or errored.") if indices is None: raise IOError("ERR#0037: indices not found or unable to retrieve.") country = unidecode(country.strip().lower()) if country not in get_index_countries(): raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.") indices = indices[indices['country'] == country] index = unidecode(index.strip().lower()) if index not in list(indices['name'].apply(unidecode).str.lower()): raise RuntimeError("ERR#0045: index " + index + " not found, check if it is correct.") name = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'name'] tag = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'tag'] url = "https://www.investing.com/indices/" + tag head = { "User-Agent": random_user_agent(), "X-Requested-With": "XMLHttpRequest", "Accept": "text/html", "Accept-Encoding": "gzip, deflate", "Connection": "keep-alive", } req = requests.get(url, headers=head) if req.status_code != 200: raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.") root_ = fromstring(req.text) path_ = root_.xpath("//dl[@data-test='key-info']/div") result = pd.DataFrame(columns=["Index Name", "Prev. Close", "Volume", "Day's Range", "Open", "Average Vol. (3m)", "52 wk Range", "1-Year Change"]) result.at[0, 'Index Name'] = name if not path_: raise RuntimeError("ERR#0004: data retrieval error while scraping.") for elements_ in path_: title_ = elements_.xpath(".//dt")[0].text_content() element = elements_.xpath(".//dd")[0] if title_ in result.columns.tolist(): try: result.at[0, title_] = float(element.text_content().replace(',', '')) continue except: pass try: text = element.text_content().strip() result.at[0, title_] = datetime.strptime(text, "%b %d, %Y").strftime("%d/%m/%Y") continue except: pass try: value = element.text_content().strip() if value.__contains__('K'): value = float(value.replace('K', '').replace(',', '')) * 1e3 elif value.__contains__('M'): value = float(value.replace('M', '').replace(',', '')) * 1e6 elif value.__contains__('B'): value = float(value.replace('B', '').replace(',', '')) * 1e9 elif value.__contains__('T'): value = float(value.replace('T', '').replace(',', '')) * 1e12 result.at[0, title_] = value continue except: pass result.replace({'N/A': None}, inplace=True) if as_json is True: json_ = result.iloc[0].to_dict() return json_ elif as_json is False: return result
This function retrieves fundamental financial information from the specified index. The retrieved information from the index can be valuable as it is additional information that can be used combined with OHLC values, so to determine financial insights from the company which holds the specified index. Args: index (:obj:`str`): name of the index to retrieve recent historical data from. country (:obj:`str`): name of the country from where the index is. as_json (:obj:`bool`, optional): optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`). Returns: :obj:`pandas.DataFrame` or :obj:`dict`- index_information: The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com from the specified index; it can also be returned as a :obj:`dict`, if argument `as_json=True`. If any of the information fields could not be retrieved, that field/s will be filled with None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like:: index_information = { "Index Name": "S&P Merval", "Prev. Close": 36769.59, "Volume": None, "Todays Range": "36,769.59 - 37,894.32", "Open": 36769.59, "Average Vol. (3m)": None, "52 wk Range": "22,484.4 - 44,470.76", "1-Year Change": "18.19%" } Raises: ValueError: raised if any of the introduced arguments is not valid or errored. FileNotFoundError: raised if `indices.csv` file was not found or errored. IOError: raised if `indices.csv` file is empty or errored. RuntimeError: raised if scraping process failed while running. ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
https://github.com/alvarobartt/investpy/blob/2e37300902f6df122931bb373fb17609477664e8/investpy/indices.py#L621-L762
from datetime import datetime, date, timedelta import pytz import json from random import randint import pandas as pd import pkg_resources import requests from unidecode import unidecode from lxml.html import fromstring from .utils.extra import random_user_agent from .utils.data import Data from .data.indices_data import indices_as_df, indices_as_list, indices_as_dict from .data.indices_data import index_countries_as_list def get_indices(country=None): return indices_as_df(country=country) def get_indices_list(country=None): return indices_as_list(country=country) def get_indices_dict(country=None, columns=None, as_json=False): return indices_as_dict(country=country, columns=columns, as_json=as_json) def get_index_countries(): return index_countries_as_list() def get_index_recent_data(index, country, as_json=False, order='ascending', interval='Daily'): if not index: raise ValueError("ERR#0047: index param is mandatory and should be a str.") if not isinstance(index, str): raise ValueError("ERR#0047: index param is mandatory and should be a str.") if country is None: raise ValueError("ERR#0039: country can not be None, it should be a str.") if country is not None and not isinstance(country, str): raise ValueError("ERR#0025: specified country value not valid.") if not isinstance(as_json, bool): raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.") if order not in ['ascending', 'asc', 'descending', 'desc']: raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.") if not interval: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if not isinstance(interval, str): raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") interval = interval.lower() if interval not in ['daily', 'weekly', 'monthly']: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") resource_package = 'investpy' resource_path = '/'.join(('resources', 'indices.csv')) if pkg_resources.resource_exists(resource_package, resource_path): indices = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False) else: raise FileNotFoundError("ERR#0059: indices file not found or errored.") if indices is None: raise IOError("ERR#0037: indices not found or unable to retrieve.") country = unidecode(country.strip().lower()) if country not in get_index_countries(): raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.") indices = indices[indices['country'] == country] index = unidecode(index.strip().lower()) if index not in list(indices['name'].apply(unidecode).str.lower()): raise RuntimeError("ERR#0045: index " + index + " not found, check if it is correct.") full_name = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'full_name'] id_ = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'id'] name = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'name'] index_currency = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'currency'] header = full_name + ' Historical Data' params = { "curr_id": id_, "smlID": str(randint(1000000, 99999999)), "header": header, "interval_sec": interval.capitalize(), "sort_col": "date", "sort_ord": "DESC", "action": "historical_data" } head = { "User-Agent": random_user_agent(), "X-Requested-With": "XMLHttpRequest", "Accept": "text/html", "Accept-Encoding": "gzip, deflate", "Connection": "keep-alive", } url = "https://www.investing.com/instruments/HistoricalDataAjax" req = requests.post(url, headers=head, data=params) if req.status_code != 200: raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.") root_ = fromstring(req.text) path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr") result = list() if path_: for elements_ in path_: if elements_.xpath(".//td")[0].text_content() == 'No results found': raise IndexError("ERR#0046: index information unavailable or not found.") info = [] for nested_ in elements_.xpath(".//td"): info.append(nested_.get('data-real-value')) index_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d') index_close = float(info[1].replace(',', '')) index_open = float(info[2].replace(',', '')) index_high = float(info[3].replace(',', '')) index_low = float(info[4].replace(',', '')) index_volume = int(info[5]) result.insert(len(result), Data(index_date, index_open, index_high, index_low, index_close, index_volume, index_currency, None)) if order in ['ascending', 'asc']: result = result[::-1] elif order in ['descending', 'desc']: result = result if as_json is True: json_ = { 'name': name, 'recent': [value.index_as_json() for value in result] } return json.dumps(json_, sort_keys=False) elif as_json is False: df = pd.DataFrame.from_records([value.index_to_dict() for value in result]) df.set_index('Date', inplace=True) return df else: raise RuntimeError("ERR#0004: data retrieval error while scraping.") def get_index_historical_data(index, country, from_date, to_date, as_json=False, order='ascending', interval='Daily'): if not index: raise ValueError("ERR#0047: index param is mandatory and should be a str.") if not isinstance(index, str): raise ValueError("ERR#0047: index param is mandatory and should be a str.") if country is None: raise ValueError("ERR#0039: country can not be None, it should be a str.") if country is not None and not isinstance(country, str): raise ValueError("ERR#0025: specified country value not valid.") try: datetime.strptime(from_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") try: datetime.strptime(to_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") start_date = datetime.strptime(from_date, '%d/%m/%Y') end_date = datetime.strptime(to_date, '%d/%m/%Y') if start_date >= end_date: raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.") if not isinstance(as_json, bool): raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.") if order not in ['ascending', 'asc', 'descending', 'desc']: raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.") if not interval: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if not isinstance(interval, str): raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") interval = interval.lower() if interval not in ['daily', 'weekly', 'monthly']: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") date_interval = { 'intervals': [], } flag = True while flag is True: diff = end_date.year - start_date.year if diff > 19: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': start_date.replace(year=start_date.year + 19).strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) start_date = start_date.replace(year=start_date.year + 19) + timedelta(days=1) else: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': end_date.strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) flag = False interval_limit = len(date_interval['intervals']) interval_counter = 0 data_flag = False resource_package = 'investpy' resource_path = '/'.join(('resources', 'indices.csv')) if pkg_resources.resource_exists(resource_package, resource_path): indices = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False) else: raise FileNotFoundError("ERR#0059: indices file not found or errored.") if indices is None: raise IOError("ERR#0037: indices not found or unable to retrieve.") country = unidecode(country.strip().lower()) if country not in get_index_countries(): raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.") indices = indices[indices['country'] == country] index = unidecode(index.strip().lower()) if index not in list(indices['name'].apply(unidecode).str.lower()): raise RuntimeError("ERR#0045: index " + index + " not found, check if it is correct.") full_name = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'full_name'] id_ = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'id'] name = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'name'] index_currency = indices.loc[(indices['name'].apply(unidecode).str.lower() == index).idxmax(), 'currency'] final = list() header = full_name + ' Historical Data' for index in range(len(date_interval['intervals'])): interval_counter += 1 params = { "curr_id": id_, "smlID": str(randint(1000000, 99999999)), "header": header, "st_date": date_interval['intervals'][index]['start'], "end_date": date_interval['intervals'][index]['end'], "interval_sec": interval.capitalize(), "sort_col": "date", "sort_ord": "DESC", "action": "historical_data" } head = { "User-Agent": random_user_agent(), "X-Requested-With": "XMLHttpRequest", "Accept": "text/html", "Accept-Encoding": "gzip, deflate", "Connection": "keep-alive", } url = "https://www.investing.com/instruments/HistoricalDataAjax" req = requests.post(url, headers=head, data=params) if req.status_code != 200: raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.") if not req.text: continue root_ = fromstring(req.text) path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr") result = list() if path_: for elements_ in path_: if elements_.xpath(".//td")[0].text_content() == 'No results found': if interval_counter < interval_limit: data_flag = False else: raise IndexError("ERR#0046: index information unavailable or not found.") else: data_flag = True info = [] for nested_ in elements_.xpath(".//td"): info.append(nested_.get('data-real-value')) if data_flag is True: index_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d') index_close = float(info[1].replace(',', '')) index_open = float(info[2].replace(',', '')) index_high = float(info[3].replace(',', '')) index_low = float(info[4].replace(',', '')) index_volume = int(info[5]) result.insert(len(result), Data(index_date, index_open, index_high, index_low, index_close, index_volume, index_currency, None)) if data_flag is True: if order in ['ascending', 'asc']: result = result[::-1] elif order in ['descending', 'desc']: result = result if as_json is True: json_list = [value.index_as_json() for value in result] final.append(json_list) elif as_json is False: df = pd.DataFrame.from_records([value.index_to_dict() for value in result]) df.set_index('Date', inplace=True) final.append(df) else: raise RuntimeError("ERR#0004: data retrieval error while scraping.") if order in ['descending', 'desc']: final.reverse() if as_json is True: json_ = { 'name': name, 'historical': [value for json_list in final for value in json_list] } return json.dumps(json_, sort_keys=False) elif as_json is False: return pd.concat(final)
MIT License
rmarkello/abagen
abagen/reporting.py
_add_references
python
def _add_references(report): refreport = '' for ref, cite in REFERENCES.items(): if ref in report: refreport += f'[{ref}]: {cite}<p>' if len(refreport) > 0: refreport = '<br> REFERENCES<p>----------<p>' + refreport if refreport.endswith('<p> '): refreport = refreport[:-4] return refreport
Detects references in `report` and generates list Parameters ---------- report : str Report body Returns ------- references : str List of references to be appended to `report`
https://github.com/rmarkello/abagen/blob/2aeab5bd0f147fa76b488645e148a1c18095378d/abagen/reporting.py#L599-L625
import logging import numpy as np from . import __version__ from .datasets import check_donors, fetch_donor_info from .images import coerce_atlas_to_dict from .utils import first_entry LGR = logging.getLogger('abagen') METRICS = { np.mean: 'mean', np.median: 'median' } REFERENCES = dict( A2019N=('Arnatkevic̆iūtė, A., Fulcher, B. D., & Fornito, A. (2019). ' 'A practical guide to linking brain-wide gene expression and ' 'neuroimaging data. Neuroimage, 189, 353-367.'), F2016P=('Fulcher, B. D., & Fornito, A. (2016). A transcriptional ' 'signature of hub connectivity in the mouse connectome. ' 'Proceedings of the National Academy of Sciences, 113(5), ' '1435-1440.'), F2013J=('Fulcher, B. D., Little, M. A., & Jones, N. S. (2013). ' 'Highly comparative time-series analysis: the empirical ' 'structure of time series and their methods. Journal of ' 'the Royal Society Interface, 10(83), 20130048.'), H2012N=('Hawrylycz, M. J., Lein, E. S., Guillozet-Bongaarts, A. L., ' 'Shen, E. H., Ng, L., Miller, J. A., ... & Jones, A. R. ' '(2012). An anatomically comprehensive atlas of the adult ' 'human brain transcriptome. Nature, 489(7416), 391-399.'), H2015N=('Hawrylycz, M., Miller, J. A., Menon, V., Feng, D., ' 'Dolbeare, T., Guillozet-Bongaarts, A. L., ... & Lein, E. ' '(2015). Canonical genetic signatures of the adult human ' 'brain. Nature Neuroscience, 18(12), 1832.'), M2021B=('Markello, R.D., Arnatkevic̆iūtė, A., Poline, J-B., Fulcher, B. ' 'D., Fornito, A., & Misic, B. (2021). Standardizing workflows in ' 'imaging transcriptomics with the abagen toolbox. Biorxiv.'), P2017G=('Parkes, L., Fulcher, B. D., Yücel, M., & Fornito, A. ' '(2017). Transcriptional signatures of connectomic ' 'subregions of the human striatum. Genes, Brain and ' 'Behavior, 16(7), 647-663.'), Q2002N=('Quackenbush, J. (2002). Microarray data normalization and ' 'transformation. Nature Genetics, 32(4), 496-501.'), R2018N=('Romero-Garcia, R., Whitaker, K. J., Váša, F., Seidlitz, ' 'J., Shinn, M., Fonagy, P., ... & NSPN Consortium. (2018). ' 'Structural covariance networks are coupled to expression ' 'of genes enriched in supragranular layers of the human ' 'cortex. NeuroImage, 171, 256-267.') ) class Report: def __init__(self, atlas, atlas_info=None, *, ibf_threshold=0.5, probe_selection='diff_stability', donor_probes='aggregate', lr_mirror=None, missing=None, tolerance=2, sample_norm='srs', gene_norm='srs', norm_matched=True, norm_structures=False, region_agg='donors', agg_metric='mean', corrected_mni=True, reannotated=True, donors='all', return_donors=False, data_dir=None, counts=None, n_probes=None, n_genes=None): efflevel = LGR.getEffectiveLevel() LGR.setLevel(100) atlas, self.group_atlas = coerce_atlas_to_dict(atlas, donors, atlas_info=atlas_info, data_dir=data_dir) self.atlas = first_entry(atlas) self.atlas_info = self.atlas.atlas_info self.ibf_threshold = ibf_threshold self.probe_selection = probe_selection self.donor_probes = donor_probes self.lr_mirror = lr_mirror self.missing = missing self.tolerance = tolerance self.sample_norm = sample_norm self.gene_norm = gene_norm self.norm_matched = norm_matched self.norm_structures = norm_structures self.region_agg = region_agg self.agg_metric = METRICS.get(agg_metric, agg_metric) self.corrected_mni = corrected_mni self.reannotated = reannotated self.donors = donors self.return_donors = return_donors self.counts = counts self.n_probes = n_probes self.n_genes = n_genes self.body = self.gen_report() LGR.setLevel(efflevel) def gen_report(self): report = '' report += """ Regional microarry expression data were obtained from {n_donors} post-mortem brains ({n_female} female, ages {min}--{max}, {mean:.2f} +/- {std:.2f}) provided by the Allen Human Brain Atlas (AHBA, https://human.brain-map.org; [H2012N]). Data were processed with the abagen toolbox (version {vers}; https://github.com/rmarkello/abagen; [M2021B]) """.format(**_get_donor_demographics(self.donors), vers=__version__) if self.atlas.volumetric and self.group_atlas: report += """ using a {n_region}-region volumetric atlas in MNI space.<br> """.format(n_region=len(self.atlas.labels)) elif not self.atlas.volumetric and self.group_atlas: report += """ using a {n_region}-region surface-based atlas in MNI space.<br> """.format(n_region=len(self.atlas.labels)) elif self.atlas.volumetric and not self.group_atlas: report += """ using a {n_region}-region volumetric atlas, independently aligned to each donor's native MRI space.<br> """.format(n_region=len(self.atlas.labels)) else: report += ".<br>" if self.reannotated: report += """ First, microarray probes were reannotated using data provided by [A2019N]; probes not matched to a valid Entrez ID were discarded. """ else: report += """ First, microarray probes not matched to a valid Entrez ID were discarded. """ if self.ibf_threshold > 0: report += """ Next, probes were filtered based on their expression intensity relative to background noise [Q2002N], such that probes with intensity less than the background in >={threshold:.2f}% of samples across donors were discarded """.format(threshold=self.ibf_threshold * 100) if self.n_probes is not None: report += """ , yielding {n_probes:,} probes """.format(n_probes=self.n_probes) report += "." if self.probe_selection == 'average': report += """ When multiple probes indexed the expression of the same gene we calculated the mean expression across probes. """ elif self.probe_selection == 'diff_stability': report += r""" When multiple probes indexed the expression of the same gene, we selected and used the probe with the most consistent pattern of regional variation across donors (i.e., differential stability; [H2015N]), calculated with:<br> $$ \Delta_{{S}}(p) = \frac{{1}}{{\binom{{N}}{{2}}}} \, \sum_{{i=1}}^{{N-1}} \sum_{{j=i+1}}^{{N}} \rho[B_{{i}}(p), B_{{j}}(p)] $$<br> where $ \rho $ is Spearman's rank correlation of the expression of a single probe, p, across regions in two donors $B_{{i}}$ and $B_{{j}}$, and N is the total number of donors. Here, regions correspond to the structural designations provided in the ontology from the AHBA. """ elif self.probe_selection == "pc_loading": report += """ When multiple probes indexed the expression of the same gene we selected and used the probe with the highest loading on the first principal component taken from a decomposition of probe expression across samples from all donors [P2017G]. """ elif self.probe_selection == "max_intensity": report += """ When multiple probes indexed the expression of the same gene we selected and used the probe with the highest mean intensity across samples. """ elif self.probe_selection == "max_variance": report += """ When multiple probes indexed the expression of the same gene we selected and used the probe with the highest variance across samples. """ elif self.probe_selection == "corr_intensity": report += """ When multiple probes indexed the expression of the same gene we selected and used the expression profile of a single representative probe. For genes where only two probes were available we selected the probe with the highest mean intensity across samples; where three or more probes were available, we calculated the correlation of probe expression across samples and selected the probe with the highest average correlation. """ elif self.probe_selection == "corr_variance": report += """ When multiple probes indexed the expression of the same gene we selected and used the expression profile of a single representative probe. For genes where only two probes were available we selected the probe with the highest variance across samples; where three or more probes were available, we calculated the correlation of probe expression across samples and selected the probe with the highest average correlation. """ elif self.probe_selection == "rnaseq": report += """ When multiple probes indexed the expression of the same gene we selected and used the probe with the most consistent pattern of regional expression to RNA-seq data (available for two donors in the AHBA). That is, we calculated the Spearman’s rank correlation between each probes' microarray expression and RNA-seq expression data of the corresponding gene, and selected the probe with the highest correspondence. Here, regions correspond to the structural designations provided in the ontology from the AHBA. """ if (self.donor_probes == "aggregate" and self.probe_selection not in ['average', 'diff_stability', 'rnaseq']): report += """ The selection of probes was performed using sample expression data aggregated across all donors.<br> """ elif (self.donor_probes == "aggregate" and self.probe_selection in ['average', 'diff_stability', 'rnaseq']): report += "<br>" elif self.donor_probes == "independent": report += """ The selection of probes was performed independently for each donor, such that the probes chosen to represent each gene could differ across donors.<br> """ elif self.donor_probes == "common": report += """ The selection of probes was performed independently for each donor, and the probe most commonly selected across all donors was chosen to represent the expression of each gene for all donors.<br> """ if self.corrected_mni and self.group_atlas: report += """ The MNI coordinates of tissue samples were updated to those generated via non-linear registration using the Advanced Normalization Tools (ANTs; https://github.com/chrisfilo/alleninf). """ if self.lr_mirror == 'bidirectional': report += """ To increase spatial coverage, tissue samples were mirrored bilaterally across the left and right hemispheres [R2018N]. """ elif self.lr_mirror == 'leftright': report += """ To increase spatial coverage, tissue samples in the left hemisphere were mirrored into the right hemisphere [R2018N]. """ elif self.lr_mirror == 'rightleft': report += """ To increase spatial coverage, tissue samples in the right hemisphere were mirrored into the left hemisphere [R2018N]. """ if self.tolerance == 0 and not self.atlas.volumetric: report += """ Samples were assigned to brain regions by minimizing the Euclidean distance between the {space} coordinates of each sample and the nearest surface vertex. Samples where the Euclidean distance to the nearest vertex was greater than the mean distance for all samples belonging to that donor were excluded. """.format(space='MNI' if self.group_atlas else 'native voxel') elif self.tolerance == 0 and self.atlas.volumetric: report += """ Samples were assigned to brain regions in the provided atlas only if their {space} coordinates were directly within a voxel belonging to a parcel. """.format(space='MNI' if self.group_atlas else 'native voxel') elif self.tolerance > 0 and not self.atlas.volumetric: report += """ Samples were assigned to brain regions by minimizing the Euclidean distance between the {space} coordinates of each sample and the nearest surface vertex. Samples where the Euclidean distance to the nearest vertex was more than {tolerance} standard deviations above the mean distance for all samples belonging to that donor were excluded. """.format(space='MNI' if self.group_atlas else 'native voxel', tolerance=self.tolerance) elif self.tolerance > 0 and self.atlas.volumetric: report += """ Samples were assigned to brain regions in the provided atlas if their {space} coordinates were within {tolerance} mm of a given parcel. """.format(space='MNI' if self.group_atlas else 'native voxel', tolerance=self.tolerance) elif self.tolerance < 0 and not self.atlas.volumetric: report += """ Samples were assigned to brain regions by minimizing the Euclidean distance between the {space} coordinates of each sample and the nearest surface vertex. Samples where the Euclidean distance to the nearest vertex was more than {tolerance}mm were excluded. """.format(space='MNI' if self.group_atlas else 'native voxel', tolerance=abs(self.tolerance)) if self.atlas_info is not None: report += """ To reduce the potential for misassignment, sample-to-region matching was constrained by hemisphere and gross structural divisions (i.e., cortex, subcortex/brainstem, and cerebellum, such that e.g., a sample in the left cortex could only be assigned to an atlas parcel in the left cortex; [A2019N]). """ if self.missing == 'centroids': report += """ If a brain region was not assigned a sample from any donor based on the above procedure, the tissue sample closest to the centroid of that parcel was identified independently for each donor. The average of these samples was taken across donors, weighted by the distance between the parcel centroid and the sample, to obtain an estimate of the parcellated expression values for the missing region. """ if self.counts is not None: n_missing = np.sum(self.counts.sum(axis=1) == 0) if n_missing > 0: report += """ This procedure was performed for {n_missing} regions that were not assigned tissue samples. """.format(n_missing=n_missing) elif self.missing == 'interpolate': report += """ If a brain region was not assigned a tissue sample based on the above procedure, every {node} in the region was mapped to the nearest tissue sample from the donor in order to generate a dense, interpolated expression map. The average of these expression values was taken across all {nodes} in the region, weighted by the distance between each {node} and the sample mapped to it, in order to obtain an estimate of the parcellated expression values for the missing region. """.format(node='voxel' if self.atlas.volumetric else 'vertex', nodes='voxels' if self.atlas.volumetric else 'vertices') if self.norm_matched: report += """ All tissue samples not assigned to a brain region in the provided atlas were discarded. """ report += "<br>" if self.sample_norm is not None: report += """ Inter-subject variation was addressed {} """.format(_get_norm_procedure(self.sample_norm, 'sample_norm')) if self.gene_norm is None: pass elif self.gene_norm == self.sample_norm: report += """ Gene expression values were then normalized across tissue samples using an identical procedure. """ elif (self.gene_norm != self.sample_norm and self.sample_norm is not None): report += """ Inter-subject variation in gene expression was then addressed {} """.format(_get_norm_procedure(self.gene_norm, 'gene_norm')) elif self.gene_norm != self.sample_norm and self.sample_norm is None: report += """ Inter-subject variation was addressed {} """.format(_get_norm_procedure(self.gene_norm, 'gene_norm')) if not self.norm_matched and not self.norm_structures: report += """ All available tissue samples were used in the normalization process regardless of whether they were assigned to a brain region. """ elif not self.norm_matched and self.norm_structures: report += """ All available tissue samples were used in the normalization process regardless of whether they were matched to a brain region; however, normalization was performed separately for samples in distinct structural classes (i.e., cortex, subcortex/brainstem, cerebellum). """ elif self.norm_matched and self.norm_structures: report += """ Normalization was performed separately for samples in distinct structural classes (i.e., cortex, subcortex/brainstem, cerebellum). """ if not self.norm_matched and self.gene_norm is not None: report += """ Tissue samples not matched to a brain region were discarded after normalization. """ if self.region_agg == 'donors' and self.agg_metric == 'mean': report += """ Samples assigned to the same brain region were averaged separately for each donor{donors}, yielding a regional expression matrix {n_donors} """.format(donors=' and then across donors' if not self.return_donors else '', n_donors=' for each donor ' if self.return_donors else '') elif self.region_agg == 'samples' and self.agg_metric == 'mean': report += """ Samples assigned to the same brain region were averaged across all donors, yielding a regional expression matrix """ elif self.region_agg == 'donors' and self.agg_metric == 'median': report += """ The median value of samples assigned to the same brain region was computed separately for each donor{donors}, yielding a regional expression matrix{n_donors} """.format(donors=' and then across donors' if not self.return_donors else '', n_donors=' for each donor' if self.return_donors else '') elif self.region_agg == 'samples' and self.agg_metric == 'median': report += """ The median value of samples assigned to the same brain region was computed across donors, yielding a single regional expression matrix """ if self.n_genes is not None: report += """ with {n_region} rows, corresponding to brain regions, and {n_genes:,} columns, corresponding to the retained genes """.format(n_region=len(self.atlas.labels), n_genes=self.n_genes) report += "\b." report += str(_add_references(report)) return _sanitize_text(report) def _sanitize_text(text): text = ' '.join(text.replace('\n', ' ').split()) return text.replace('<br> ', '\n\n') .replace('<p>', '\n') def _get_donor_demographics(donors): donors = [int(i) for i in check_donors(donors)] info = fetch_donor_info().set_index('uid').loc[donors] age_info = info['age'].describe() dinfo = dict(n_donors=len(info), n_female=sum(info['sex'] == 'F')) dinfo.update(age_info.loc[['min', 'max', 'mean', 'std']].to_dict()) return dinfo def _get_norm_procedure(norm, parameter): if parameter not in ('sample_norm', 'gene_norm'): raise ValueError(f'Invalid norm parameter {parameter}') if parameter == 'sample_norm': mod = ('tissue sample expression values across genes') suff = ('tissue sample across genes') else: mod = ('gene expression values across tissue samples') suff = ('gene across tissue samples') sigmoid = r""" independently for each donor using a sigmoid function [F2016P]:<br> $$ x_{{norm}} = \frac{{1}}{{1 + \exp(-\frac{{(x - \overline{{x}})}} {{\sigma_{{x}}}})}} $$<br> where $\bar{{x}}$ is the arithmetic mean and $\sigma_{{x}}$ is the sample standard deviation of the expression of a single """ robust_sigmoid = r""" using a robust sigmoid function [F2013J]:<br> $$ x_{{norm}} = \frac{{1}}{{1 + \exp(-\frac{{(x-\langle x \rangle)}} {{\text{{IQR}}_{{x}}}})}} $$<br> where $\langle x \rangle$ is the median and $\text{{IQR}}_{{x}}$ is the normalized interquartile range of the expression of a single """ rescale = r""" Normalized expression values were then rescaled to the unit interval: <br> $$ x_{{scaled}} = \frac{{x_{{norm}} - \min(x_{{norm}})}} {{\max(x_{{norm}}) - \min(x_{{norm}})}} $$<br> """ procedure = '' if norm in ['center', 'demean']: procedure += """ by demeaning {mod} independently for each donor. """.format(mod=mod) elif norm == 'zscore': procedure += """ by mean- and variance-normalizing (i.e., z-scoring) {mod} independently for each donor. """.format(mod=mod) elif norm == 'minmax': procedure += r""" by rescaling {mod} to the unit interval independently for each donor: <br> $$ x_{{{{scaled}}}} = \frac{{{{x - \min(x)}}}}{{{{\max(x) - \min(x)}}}} $$<br> """.format(mod=mod) elif norm in ['sigmoid', 'sig']: procedure += r""" by normalizing {mod} {sigmoid} {suff}. """.format(mod=mod, sigmoid=sigmoid, suff=suff) elif norm in ['scaled_sigmoid', 'scaled_sig']: procedure += r""" by normalizing {mod} {sigmoid} {suff}. {rescale} """.format(mod=mod, sigmoid=sigmoid, suff=suff, rescale=rescale) elif norm in ['scaled_sigmoid_quantiles', 'scaled_sig_qnt']: procedure += r""" by normalizing {mod} {sigmoid} {suff}, calculated using only data in the 5–95th percentile range to downweight the impact of outliers. {rescale} """.format(mod=mod, sigmoid=sigmoid, suff=suff, rescale=rescale) elif norm in ['robust_sigmoid', 'rsig', 'rs']: procedure += r""" by normalizing {mod} {robust_sigmoid} {suff}. """.format(mod=mod, robust_sigmoid=robust_sigmoid, suff=suff) elif norm in ['scaled_robust_sigmoid', 'scaled_rsig', 'srs']: procedure += r""" by normalizing {mod} {robust_sigmoid} {suff}. {rescale} """.format(mod=mod, robust_sigmoid=robust_sigmoid, suff=suff, rescale=rescale) elif norm in ['mixed_sigmoid', 'mixed_sig']: procedure += r""" by normalizing {mod} using a mixed sigmoid function [F2013J]:<br> $$ x_{{{{norm}}}} = \left\{{{{\begin{{{{array}}}}{{{{r r}}}} \frac{{{{1}}}}{{{{1 + \exp(-\frac{{{{(x-\overline{{{{x}}}})}}}} {{{{\sigma_{{{{x}}}}}}}})}}}} ,& \text{{{{IQR}}}}_{{{{x}}}} = 0 \frac{{{{1}}}}{{{{1 + \exp(-\frac{{{{(x-\langle x \rangle)}}}} {{{{\text{{{{IQR}}}}_{{{{x}}}}}}}})}}}} ,& \text{{{{IQR}}}}_{{{{x}}}} \neq 0 \end{{{{array}}}}\right. $$<br> where $\bar{{{{x}}}}$ is the arithmetic mean, $\sigma_{{{{x}}}}$ is the sample standard deviation, $\langle x \rangle$ is the median, and $\text{{{{IQR}}}}_{{{{x}}}}$ is the normalized interquartile range of the expression value of a single {suff}. {rescale} """.format(mod=mod, suff=suff, rescale=rescale) return procedure
BSD 3-Clause New or Revised License
xiongma/dgcnn
bert/util.py
label_smoothing
python
def label_smoothing(inputs, epsilon=0.1): V = tf.cast(tf.shape(inputs)[-1], tf.float32) return ((1 - epsilon) * inputs) + (epsilon / V)
Applies label smoothing. See 5.4 and https://arxiv.org/abs/1512.00567. inputs: 3d tensor. [N, T, V], where V is the number of vocabulary. epsilon: Smoothing rate. For example, ``` import tensorflow as tf inputs = tf.convert_to_tensor([[[0, 0, 1], [0, 1, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [0, 1, 0]]], tf.float32) outputs = label_smoothing(inputs) with tf.Session() as sess: print(sess.run([outputs])) >> [array([[[ 0.03333334, 0.03333334, 0.93333334], [ 0.03333334, 0.93333334, 0.03333334], [ 0.93333334, 0.03333334, 0.03333334]], [[ 0.93333334, 0.03333334, 0.03333334], [ 0.93333334, 0.03333334, 0.03333334], [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)] ```
https://github.com/xiongma/dgcnn/blob/8886878caa5872fed2fe3b4a86cca8b4938c2629/bert/util.py#L11-L44
import tensorflow as tf
MIT License
nicta/dora
dora/active_sampling/pltutils.py
plot_sampler_progress
python
def plot_sampler_progress(sampler, ax): assert sampler.dims == 2 assert sampler.n_tasks == 1 X = sampler.X() y = sampler.y().flatten() if sampler.name == 'GaussianProcess': n_grids = 400 xi = np.linspace(sampler.lower[0], sampler.upper[0], num=n_grids) yi = np.linspace(sampler.lower[1], sampler.upper[1], num=n_grids) xg, yg = np.meshgrid(xi, yi) X_test = np.array([xg.flatten(), yg.flatten()]).T zg = np.reshape(sampler.predict(X_test)[0], xg.shape) extent = [sampler.lower[0], sampler.upper[0], sampler.upper[1], sampler.lower[1]] ax.imshow(zg, extent=extent) elif sampler.name == 'Delaunay': import matplotlib.pyplot as pl import matplotlib as mpl cols = pl.cm.jet(np.linspace(0, 1, 64)) custom = mpl.colors.ListedColormap(cols * 0.5 + 0.5) w = 4. / np.log(1 + len(y)) ax.tripcolor(X[:, 0], X[:, 1], y, shading='gouraud', edgecolors='k', linewidth=w, cmap=custom) ax.triplot(X[:, 0], X[:, 1], color='k', linewidth=w) else: raise ValueError('Sampler "%s" not implemented yet' % sampler.name) ax.scatter(X[:, 0], X[:, 1], c=y) ax.set_title('%d Samples' % len(y)) ax.axis('image')
Plot the progress of a particular sampler. .. note :: Only works if n_dims = 2 and n_tasks = 1 Parameters ---------- sampler : Sampler An instance of the sampler class or its subclasses ax : matplotlib.axes._subplots.AxesSubplot The axes or subplot the plotting is to be performed on
https://github.com/nicta/dora/blob/1929daa9f957a4ff42e688b116faa7699b3b1168/dora/active_sampling/pltutils.py#L33-L85
import numpy as np def split_subplots(n): n_sqrt = np.sqrt(n) c = np.ceil(n_sqrt).astype(int) r = np.floor(n_sqrt).astype(int) while c * r < n: r += 1 return r, c
Apache License 2.0
zijianzhang/carla_invs
PCDet/pcdet/datasets/dataset.py
DatasetTemplate.get_infos
python
def get_infos(self, **kwargs): raise NotImplementedError
generate data infos from raw data for the dataset
https://github.com/zijianzhang/carla_invs/blob/826a5a39e70d0a94c03db6a93c4c3ecdc181465b/PCDet/pcdet/datasets/dataset.py#L13-L15
import numpy as np from collections import defaultdict import torch.utils.data as torch_data from ..utils import box_utils, common_utils from ..config import cfg from .data_augmentation import augmentation_utils import pdb class DatasetTemplate(torch_data.Dataset): def __init__(self): super().__init__()
MIT License
arch4ngel/bruteloops
BruteLoops/brute.py
BruteForcer.launch
python
def launch(self): if self.config.max_auth_tries: limit = self.config.max_auth_tries else: limit = 1 sleeping = False recovered = False '''Logic iterates through each prioritized username and password value and determines if it resides in the database. A ValueError is raised if it doesn't exist in the database. Note that the password value is checked for both normal passwords and credentials. No error is raised so long as the value resides in one of the two tables. ''' while True: try: usernames = self.main_db_sess.query(sql.Username) .join(sql.Credential) .filter( sql.Username.recovered == False, sql.Username.future_time <= time(), sql.Credential.guessed == False) .order_by(sql.Username.priority.desc()) .order_by(sql.Credential.strict.desc()) .all() if not usernames and not sleeping: u = self.main_db_sess.query(sql.Username) .filter(sql.Username.recovered == 0) .order_by(sql.Username.future_time.desc()) .first() sleeping = True if u and u.future_time > 60+time(): self.logger.log( GENERAL_EVENTS, f'Sleeping until {BruteTime.float_to_str(u.future_time)}' ) elif usernames and sleeping: sleeping = False for username in usernames: credentials = self.main_db_sess.query(sql.Credential) .join(sql.Password) .filter( sql.Credential.guessed == False, sql.Credential.username == username) .order_by(sql.Credential.strict.desc()) .order_by(sql.Password.priority.desc()) .limit(limit) .all() if username.recovered: continue for credential in credentials: ctime = BruteTime.current_time() if self.config.max_auth_jitter: ftime = self.config.max_auth_jitter.get_jitter_future() else: ftime = -1.0 if username.recovered: break credential.guess_time=ctime credential.username.last_time=ctime credential.username.future_time=ftime self.main_db_sess.commit() recovered = self.do_authentication_callback( credential.username.value, credential.password.value ) if recovered and self.config.stop_on_valid: break if recovered and self.config.stop_on_valid: break if recovered and self.config.stop_on_valid: self.logger.log( GENERAL_EVENTS, 'Valid credentials recovered. Exiting per ' 'stop_on_valid configuration.', ) self.shutdown() break sample_remaining = self.main_db_sess .query(sql.Username) .join(sql.Credential) .filter(sql.Username.recovered == False, sql.Credential.guessed == False) .first() if sample_remaining: if len(self.presults): outputs = self.monitor_processes() self.handle_outputs(outputs) sleep(.2) continue outputs = self.monitor_processes(ready_all=True) self.handle_outputs(outputs) self.logger.log(GENERAL_EVENTS,'Attack finished') self.shutdown() break except Exception as e: if e in self.config.exception_handlers: self.config.exception_handlers[e](self) else: self.logger.log( GENERAL_EVENTS, 'Unhandled exception occurred. Shutting down attack ' 'and returning control to the caller.' ) self.shutdown() raise e
Launch a horitontal brute force attack. The argument to `usernames` and `passwords` are expected to be either a string, tuple, or list object. Should a string be provided, it should represent a path to a file containing newline delimited values of the corresponding input. Should a tuple or list be provided, each element should be a value corrsponding to the appropriate input.
https://github.com/arch4ngel/bruteloops/blob/efc2990475094894bada9427247cd766cf69a145/BruteLoops/brute.py#L395-L628
from BruteLoops.logging import * from . import logging from .brute_time import BruteTime from . import sql from .config import Config from .db_manager import * from sqlalchemy.orm.session import close_all_sessions from pathlib import Path from uuid import uuid4 from collections import namedtuple from copy import deepcopy from time import sleep,time from types import FunctionType, MethodType import traceback import re import signal import logging from time import time UNKNOWN_PRIORITIZED_USERNAME_MSG = 'Prioritized username value supplied ' 'during configuration that does not a' 'ppear in the database. Insert this v' 'alue or remove it from the configura' 'tion: {username}' UNKNOWN_PRIORITIZED_PASSWORD_MSG = 'Prioritized password value supplied ' 'during configuration that does not a' 'ppear in the database. Insert this v' 'alue or remove it from the configura' 'tion: {password}' logger = None class BruteForcer: def __init__(self, config, use_billiard=False): if not config.validated: config.validate() self.main_db_sess = config.session_maker.new() self.handler_db_sess = config.session_maker.new() self.config = config self.presults = [] self.pool = None self.attack = None self.logger = getLogger('BruteLoops.BruteForcer', config.log_level, config.log_valid, config.log_invalid, config.log_general, config.log_file, config.log_stdout, config.log_stderr) self.logger.log( GENERAL_EVENTS, f'Initializing {config.process_count} process(es)' ) self.logger.log(GENERAL_EVENTS, 'Logging attack configuration parameters') config_attrs = [ 'authentication_jitter', 'max_auth_jitter', 'max_auth_tries', 'stop_on_valid', 'db_file', 'log_file', 'log_valid', 'log_invalid', 'log_general', 'log_stdout', 'log_stderr' ] for attr in config_attrs: self.logger.log(GENERAL_EVENTS, f'Config Parameter -- {attr}: '+str(getattr(self.config,attr))) if hasattr(self.config.authentication_callback, 'callback_name'): self.logger.log(GENERAL_EVENTS, f'Config Parameter -- callback_name: '+ getattr(self.config.authentication_callback, 'callback_name')) original_sigint_handler = signal.signal(signal.SIGINT,signal.SIG_IGN) if use_billiard: import billiard self.pool = billiard.Pool(processes=config.process_count) else: from multiprocessing.pool import Pool self.pool = Pool(processes=config.process_count) if not KeyboardInterrupt in self.config.exception_handlers: def handler(sig,exception): print('SIGINT Captured -- Shutting down ' 'attack\n') self.shutdown() print('Exiting') exit(sig) self.config.exception_handlers[KeyboardInterrupt] = handler if KeyboardInterrupt in self.config.exception_handlers: sigint_handler = config.exception_handlers[KeyboardInterrupt] sigint_class = sigint_handler.__class__ if sigint_class != MethodType and sigint_class != FunctionType: assert '__call__' in sigint_handler.__dict__, ( 'Exception handler must implement __call__' ) call_class = sigint_handler.__getattribute__('__call__').__class__ assert call_class == FunctionType or call_class == MethodType, ( '__call__ must be of type FunctionType or MethodType' ) signal.signal(signal.SIGINT, sigint_handler) else: signal.signal(signal.SIGINT, original_sigint_handler) current_time = BruteTime.current_time(format=str) self.logger.log(GENERAL_EVENTS, f'Beginning attack: {current_time}') self.attack = sql.Attack(start_time=BruteTime.current_time()) self.main_db_sess.add(self.attack) self.main_db_sess.commit() self.config = config self.realign_future_time() def handle_outputs(self, outputs): recovered = False for output in outputs: credential = self.handler_db_sess .query(sql.Credential) .join(sql.Username) .join(sql.Password) .filter( sql.Username.value == output[1], sql.Password.value == output[2], sql.Username.recovered == False) .first() if not credential: continue credential.guessed=True cred = f'{output[1]}:{output[2]}' if output[0]: recovered = True self.logger.log(VALID_CREDENTIALS,cred) credential.username.recovered=True credential.valid=True else: credential.valid=False self.logger.log(CREDENTIAL_EVENTS,cred) self.handler_db_sess.commit() return recovered def realign_future_time(self): usernames = self.main_db_sess.query(sql.Username) .filter( sql.Username.recovered == False, sql.Username.last_time > -1.0, ) for username in usernames: if self.config.max_auth_jitter: username.future_time = self.config.max_auth_jitter.get_jitter_future( current_time=username.last_time ) else: username.future_time = -1.0 self.main_db_sess.commit() def monitor_processes(self,ready_all=False): outputs = [] while True: for result in self.presults: if result.ready(): outputs.append( result.get() ) del( self.presults[ self.presults.index(result) ] ) if (ready_all and self.presults) or ( len(self.presults) == self.config.process_count): sleep(.1) continue else: return outputs def do_authentication_callback(self, username, password, stop_on_valid=False, *args, **kwargs): ''' When the maximum number of processes have been engaged to make authentication requests, call monitor_processes to watch each process until authentication finishes. Once completeds, the outputs are passed to handle_outputs, which is responsible for logging the outcome of the authentication request and updating the proper SQL record with the outcome. ''' recovered = False if len(self.presults) == self.config.process_count: outputs = self.monitor_processes() recovered = self.handle_outputs(outputs) if recovered and stop_on_valid: return recovered self.presults.append( self.pool.apply_async( self.config.authentication_callback, ( (username,password,) ) ) ) return recovered def shutdown(self): self.logger.log(GENERAL_EVENTS,'Shutting attack down') self.attack.complete = True self.attack.end_time = BruteTime.current_time() self.main_db_sess.commit() self.logger.log(GENERAL_EVENTS,'Closing/joining Processes') if self.pool: self.pool.close() self.pool.join() close_all_sessions()
MIT License
pyansys/pymapdl
ansys/mapdl/core/solution.py
Solution.vector_cnv
python
def vector_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "VMCV")
Vector magnetic potential convergence value. Examples -------- >>> mapdl.solution.vector_cnv 0.0
https://github.com/pyansys/pymapdl/blob/e5cc21471c3a8fcef1f7b88359e38aa89cd63f73/ansys/mapdl/core/solution.py#L263-L271
import weakref from ansys.mapdl.core.mapdl import _MapdlCore class Solution: def __init__(self, mapdl): if not isinstance(mapdl, _MapdlCore): raise TypeError("Must be implemented from MAPDL class") self._mapdl_weakref = weakref.ref(mapdl) @property def _mapdl(self): return self._mapdl_weakref() def _set_log_level(self, level): self._mapdl.set_log_level(level) @property def _log(self): return self._mapdl._log @property def time_step_size(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "DTIME") @property def n_cmls(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "NCMLS") @property def n_cmss(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "NCMSS") @property def n_eqit(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "EQIT") @property def n_cmit(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "NCMIT") @property def converged(self): return bool(self._mapdl.get_value("ACTIVE", 0, "SOLU", "CNVG")) @property def mx_dof(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "MXDVL") @property def res_frq(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "RESFRQ") @property def res_eig(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "RESEIG") @property def decent_parm(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "DSPRM") @property def force_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "FOCV") @property def moment_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "MOCV") @property def heat_flow_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "HFCV") @property def magnetic_flux_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "MFCV") @property def current_segment_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "CSCV") @property def current_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "CUCV") @property def fluid_flow_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "FFCV") @property def displacement_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "DICV") @property def rotation_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "ROCV") @property def temperature_cnv(self): return self._mapdl.get_value("ACTIVE", 0, "SOLU", "TECV") @property
MIT License
facebookresearch/mtenv
local_dm_control_suite/humanoid.py
Humanoid.get_reward
python
def get_reward(self, physics): standing = rewards.tolerance( physics.head_height(), bounds=(_STAND_HEIGHT, float("inf")), margin=_STAND_HEIGHT / 4, ) upright = rewards.tolerance( physics.torso_upright(), bounds=(0.9, float("inf")), sigmoid="linear", margin=1.9, value_at_margin=0, ) stand_reward = standing * upright small_control = rewards.tolerance( physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" ).mean() small_control = (4 + small_control) / 5 if self._move_speed == 0: horizontal_velocity = physics.center_of_mass_velocity()[[0, 1]] dont_move = rewards.tolerance(horizontal_velocity, margin=2).mean() return small_control * stand_reward * dont_move else: com_velocity = np.linalg.norm(physics.center_of_mass_velocity()[[0, 1]]) move = rewards.tolerance( com_velocity, bounds=(self._move_speed, float("inf")), margin=self._move_speed, value_at_margin=0, sigmoid="linear", ) move = (5 * move + 1) / 6 return small_control * stand_reward * move
Returns a reward to the agent.
https://github.com/facebookresearch/mtenv/blob/4a6d9d6fdfb321f1b51f890ef36b5161359e972d/local_dm_control_suite/humanoid.py#L204-L237
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from dm_control import mujoco from dm_control.rl import control from . import base from . import common from dm_control.suite.utils import randomizers from dm_control.utils import containers from dm_control.utils import rewards import numpy as np _DEFAULT_TIME_LIMIT = 25 _CONTROL_TIMESTEP = 0.025 _STAND_HEIGHT = 1.4 _WALK_SPEED = 1 _RUN_SPEED = 10 SUITE = containers.TaggedTasks() def get_model_and_assets(): return common.read_model("humanoid.xml"), common.ASSETS @SUITE.add("benchmarking") def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): physics = Physics.from_xml_string(*get_model_and_assets()) task = Humanoid(move_speed=0, pure_state=False, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs ) @SUITE.add("benchmarking") def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): physics = Physics.from_xml_string(*get_model_and_assets()) task = Humanoid(move_speed=_WALK_SPEED, pure_state=False, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs ) @SUITE.add("benchmarking") def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): physics = Physics.from_xml_string(*get_model_and_assets()) task = Humanoid(move_speed=_RUN_SPEED, pure_state=False, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs ) @SUITE.add() def run_pure_state( time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None ): physics = Physics.from_xml_string(*get_model_and_assets()) task = Humanoid(move_speed=_RUN_SPEED, pure_state=True, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs ) class Physics(mujoco.Physics): def torso_upright(self): return self.named.data.xmat["torso", "zz"] def head_height(self): return self.named.data.xpos["head", "z"] def center_of_mass_position(self): return self.named.data.subtree_com["torso"].copy() def center_of_mass_velocity(self): return self.named.data.sensordata["torso_subtreelinvel"].copy() def torso_vertical_orientation(self): return self.named.data.xmat["torso", ["zx", "zy", "zz"]] def joint_angles(self): return self.data.qpos[7:].copy() def extremities(self): torso_frame = self.named.data.xmat["torso"].reshape(3, 3) torso_pos = self.named.data.xpos["torso"] positions = [] for side in ("left_", "right_"): for limb in ("hand", "foot"): torso_to_limb = self.named.data.xpos[side + limb] - torso_pos positions.append(torso_to_limb.dot(torso_frame)) return np.hstack(positions) class Humanoid(base.Task): def __init__(self, move_speed, pure_state, random=None): self._move_speed = move_speed self._pure_state = pure_state super(Humanoid, self).__init__(random=random) def initialize_episode(self, physics): penetrating = True while penetrating: randomizers.randomize_limited_and_rotational_joints(physics, self.random) physics.after_reset() penetrating = physics.data.ncon > 0 super(Humanoid, self).initialize_episode(physics) def get_observation(self, physics): obs = collections.OrderedDict() if self._pure_state: obs["position"] = physics.position() obs["velocity"] = physics.velocity() else: obs["joint_angles"] = physics.joint_angles() obs["head_height"] = physics.head_height() obs["extremities"] = physics.extremities() obs["torso_vertical"] = physics.torso_vertical_orientation() obs["com_velocity"] = physics.center_of_mass_velocity() obs["velocity"] = physics.velocity() return obs
MIT License
lukaabra/subcrawl
ui/bindings.py
SubCrawl.on_click_remove_entry
python
def on_click_remove_entry(self): selected_rows = self.ScannedItems.selectionModel().selectedRows() rows_to_delete = dict() for row in selected_rows: rows_to_delete[row.row()] = (row, row.data()) rows_to_delete = collections.OrderedDict(sorted(rows_to_delete.items(), reverse=True)) for row in rows_to_delete: _, row_id = rows_to_delete[row] condition = ("id", row_id) self.interactor.delete_entry(condition) self.ScannedItems.removeRow(row) self.interactor.commit_and_renew_cursor()
Deletes rows from the highest index to the lowest as to avoid bugs while iterating through the rows and deleting them at the same time.
https://github.com/lukaabra/subcrawl/blob/85da48f513dceed2ed9f9c60f09914996357e7f9/ui/bindings.py#L241-L259
from ui.gui import Ui_SubCrawl from PyQt5 import QtWidgets from PyQt5.QtCore import pyqtSlot import os import requests import winsound import json import collections from scanner import Scanner from db_interactor import _DBInteractor from subtitles import SubtitlePreference, SubtitleDownloader class SubCrawl(Ui_SubCrawl, QtWidgets.QMainWindow): def __init__(self): Ui_SubCrawl.__init__(self) QtWidgets.QMainWindow.__init__(self) self.setupUi(self) self.selection_confirmed = False self.program_dir = os.getcwd() self.total_files = 0 self.subtitle_preference = SubtitlePreference() self.interactor = _DBInteractor(self.program_dir) self.interactor.check_if_entries_exist() self._populate_table() self.subtitle_downloader = SubtitleDownloader(self.subtitle_preference, self.PromptLabel, self.ProgressBar, self.interactor) @pyqtSlot() def _populate_table(self, db_table="all_movies", condition=None): self.ScannedItems.setRowCount(0) table_row = self.ScannedItems.rowCount() for entry in self.interactor.retrieve(db_table, condition): self.ScannedItems.insertRow(table_row) self._set_items_in_table(table_row, entry) table_row = self.ScannedItems.rowCount() if self.SelectAllRadio.isChecked(): self.select_all_movies(True) def _set_items_in_table(self, table_row: int, entry: tuple): entry_id, _, entry_location, __, entry_title, entry_year, entry_rating, entry_subs, __ = entry self.ScannedItems.setItem(table_row, 0, QtWidgets.QTableWidgetItem(str(entry_id))) self.ScannedItems.setItem(table_row, 1, QtWidgets.QTableWidgetItem(entry_title)) self.ScannedItems.setItem(table_row, 2, QtWidgets.QTableWidgetItem(entry_rating)) self.ScannedItems.setItem(table_row, 3, QtWidgets.QTableWidgetItem(entry_year)) self.ScannedItems.setItem(table_row, 4, QtWidgets.QTableWidgetItem(entry_location)) self.ScannedItems.setItem(table_row, 5, QtWidgets.QTableWidgetItem(entry_subs)) def bind_browse_button(self): self.BrowseButton.clicked.connect(self.on_click_browse) @pyqtSlot() def on_click_browse(self): self.PromptLabel.setText("Browsing...") directory = os.path.join(os.environ["HOMEPATH"], "Desktop") selected_dir = QtWidgets.QFileDialog.getExistingDirectory(self, "Open a folder", directory, QtWidgets.QFileDialog.ShowDirsOnly) if selected_dir == "": selected_dir = directory self.SelectedFolderDisplay.setText(selected_dir) self.PromptLabel.setText("Folder selected") def bind_clear_button(self): self.ClearDBButton.clicked.connect(self.on_click_clear_db) @pyqtSlot() def on_click_clear_db(self): self.interactor.clear_db() self.interactor.clear_db("selected_movies") self.interactor.clear_db("search_subs") self.interactor.clear_db("download_subs") self.ScannedItems.setRowCount(0) self.interactor.commit_and_renew_cursor() self.PromptLabel.setText("Database cleared!") def bind_confirm_selection(self): self.ConfirmSelectionButton.clicked.connect(self.on_click_confirm_selection) def on_click_confirm_selection(self): self.ScannedItems.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection) self.CancelSelectionButton.setEnabled(True) self.ConfirmSelectionButton.setEnabled(False) self.DownloadButton.setEnabled(True) self.RemoveEntryButton.setEnabled(False) selected_rows = self.ScannedItems.selectionModel().selectedRows() for row in selected_rows: condition = ("id", str(row.data())) self.interactor.copy_to_table("all_movies", "selected_movies", condition) self.interactor.commit_and_renew_cursor() self.ScannedItems.setLineWidth(2) self.PromptLabel.setText("Selection confirmed!") def bind_cancel_selection(self): self.CancelSelectionButton.clicked.connect(self.on_click_cancel_selection) def on_click_cancel_selection(self): self.ScannedItems.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.CancelSelectionButton.setEnabled(False) self.ConfirmSelectionButton.setEnabled(True) self.RemoveEntryButton.setEnabled(False) self.DownloadButton.setEnabled(False) self.ScannedItems.setLineWidth(1) self.interactor.clear_db("selected_movies") self.PromptLabel.setText("Canceled selection") def bind_combo_box(self): self.LanguageComboBox.activated.connect(self.on_click_language_combo_box) def on_click_language_combo_box(self): selected_language = self.LanguageComboBox.currentText() self.LanguageLabel.setText("Language: {}".format(selected_language)) self.subtitle_preference.add_language(selected_language) self.PromptLabel.setText("Subtitle language changed to {}".format(selected_language)) def bind_download_button(self): self.DownloadButton.clicked.connect(self.on_click_download) def on_click_download(self): self.PromptLabel.setText("Commencing download ...") self.subtitle_downloader.download_from_opensubtitles() self.on_click_scan() if self.SelectAllRadio.isChecked(): self.SelectAllRadio.setChecked(False) def bind_radio_buttons(self): self.ShowAllRadio.toggled.connect(self.view_radio_buttons) self.ShowNoSubsRadio.toggled.connect(self.view_radio_buttons) self.ShowSubsRadio.toggled.connect(self.view_radio_buttons) self.SelectAllRadio.toggled.connect(self.select_all_movies) @pyqtSlot() def view_radio_buttons(self): if self.ShowAllRadio.isChecked(): self.ScannedItems.setRowCount(0) self._populate_table("all_movies") elif self.ShowNoSubsRadio.isChecked(): self.ScannedItems.setRowCount(0) self._populate_table("all_movies", ("subtitles", str(False))) elif self.ShowSubsRadio.isChecked(): self.ScannedItems.setRowCount(0) self._populate_table("all_movies", ("subtitles", str(True))) def select_all_movies(self, checked: bool): table_range = QtWidgets.QTableWidgetSelectionRange(0, 0, self.ScannedItems.rowCount() - 1, self.ScannedItems.columnCount() - 1) if checked: self.ScannedItems.setRangeSelected(table_range, True) else: self.ScannedItems.setRangeSelected(table_range, False) def bind_remove_entry(self): self.RemoveEntryButton.clicked.connect(self.on_click_remove_entry)
MIT License
jagter/python-netbox
netbox/virtualization.py
Virtualization.delete_cluster_type_by_id
python
def delete_cluster_type_by_id(self, cluster_type_id): return self.netbox_con.delete('/virtualization/cluster-types/', cluster_type_id)
Delete a cluster type :param cluster_type_id: cluster type to delete :return: bool True if succesful otherwase delete exception
https://github.com/jagter/python-netbox/blob/fc68c2f40938d7686984151dfef690fd4e6aea5e/netbox/virtualization.py#L126-L132
import netbox.exceptions as exceptions class Virtualization(object): def __init__(self, netbox_con): self.netbox_con = netbox_con def get_choices(self, choice_id=None): return self.netbox_con.get('/virtualization/_choices/', choice_id) def get_clusters(self, **kwargs): return self.netbox_con.get('/virtualization/clusters/', **kwargs) def create_cluster(self, name, type, **kwargs): try: cluster_type_id = self.get_cluster_types(name=type)[0]['id'] except IndexError: raise exceptions.NotFoundException({"detail": "cluster-type: {}".format(name)}) from None required_fields = {"name": name, "type": cluster_type_id} return self.netbox_con.post('/virtualization/clusters/', required_fields, **kwargs) def delete_cluster(self, name): try: cluster_id = self.get_clusters(name=name)[0]['id'] except IndexError: raise exceptions.NotFoundException({"detail": "cluster {}".format(name)}) from None return self.netbox_con.delete('/virtualization/clusters/', cluster_id) def delete_cluster_by_id(self, cluster_id): return self.netbox_con.delete('/virtualization/clusters/', cluster_id) def update_cluster(self, name, **kwargs): try: cluster_id = self.get_clusters(name=name)[0]['id'] except IndexError: raise exceptions.NotFoundException({"detail": "cluster: {}".format(name)}) from None return self.netbox_con.patch('/virtualization/clusters/', cluster_id, **kwargs) def update_cluster_by_id(self, cluster_id, **kwargs): return self.netbox_con.patch('/virtualization/clusters/', cluster_id, **kwargs) def get_cluster_types(self, **kwargs): return self.netbox_con.get('/virtualization/cluster-types/', **kwargs) def create_cluster_type(self, name, slug): required_fields = {"name": name, "slug": slug} return self.netbox_con.post('/virtualization/cluster-types/', required_fields) def update_cluster_type(self, name, **kwargs): try: type_id = self.get_cluster_types(name=name)[0]['id'] except IndexError: raise exceptions.NotFoundException({"detail": "cluster-type: {}".format(name)}) from None return self.netbox_con.patch('/virtualization/cluster-types/', type_id, **kwargs) def update_cluster_type_by_id(self, cluster_type_id, **kwargs): return self.netbox_con.patch('/virtualization/cluster-types/', cluster_type_id, **kwargs) def delete_cluster_type(self, name): try: cluster_type_id = self.get_cluster_types(name=name)[0]['id'] except IndexError: raise exceptions.NotFoundException({"detail": "cluster-type: {}".format(name)}) from None return self.netbox_con.delete('/virtualization/cluster-types/', cluster_type_id)
Apache License 2.0
kaituoxu/conv-tasnet
src/utils.py
remove_pad
python
def remove_pad(inputs, inputs_lengths): results = [] dim = inputs.dim() if dim == 3: C = inputs.size(1) for input, length in zip(inputs, inputs_lengths): if dim == 3: results.append(input[:,:length].view(C, -1).cpu().numpy()) elif dim == 2: results.append(input[:length].view(-1).cpu().numpy()) return results
Args: inputs: torch.Tensor, [B, C, T] or [B, T], B is batch size inputs_lengths: torch.Tensor, [B] Returns: results: a list containing B items, each item is [C, T], T varies
https://github.com/kaituoxu/conv-tasnet/blob/94eac1023eaaf11ca1bf3c8845374f7e4cd0ef4c/src/utils.py#L49-L66
import math import torch def overlap_and_add(signal, frame_step): outer_dimensions = signal.size()[:-2] frames, frame_length = signal.size()[-2:] subframe_length = math.gcd(frame_length, frame_step) subframe_step = frame_step // subframe_length subframes_per_frame = frame_length // subframe_length output_size = frame_step * (frames - 1) + frame_length output_subframes = output_size // subframe_length subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) frame = torch.arange(0, output_subframes).unfold(0, subframes_per_frame, subframe_step) frame = signal.new_tensor(frame).long() frame = frame.contiguous().view(-1) result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) result.index_add_(-2, frame, subframe_signal) result = result.view(*outer_dimensions, -1) return result
MIT License
academysoftwarefoundation/opencue
pycue/opencue/wrappers/host.py
Host.lock
python
def lock(self): self.stub.Lock(host_pb2.HostLockRequest(host=self.data), timeout=Cuebot.Timeout)
Locks the host so that it no longer accepts new frames
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/pycue/opencue/wrappers/host.py#L64-L66
import enum import os import time from opencue import Cuebot from opencue.compiled_proto import comment_pb2 from opencue.compiled_proto import host_pb2 import opencue.wrappers.comment import opencue.wrappers.proc class Host(object): class HardwareState(enum.IntEnum): UP = host_pb2.UP DOWN = host_pb2.DOWN REBOOTING = host_pb2.REBOOTING REBOOT_WHEN_IDLE = host_pb2.REBOOT_WHEN_IDLE REPAIR = host_pb2.REPAIR class HostTagType(enum.IntEnum): MANUAL = host_pb2.MANUAL HARDWARE = host_pb2.HARDWARE ALLOC = host_pb2.ALLOC HOSTNAME = host_pb2.HOSTNAME class LockState(enum.IntEnum): OPEN = host_pb2.OPEN LOCKED = host_pb2.LOCKED NIMBY_LOCKED = host_pb2.NIMBY_LOCKED class ThreadMode(enum.IntEnum): AUTO = host_pb2.AUTO ALL = host_pb2.ALL VARIABLE = host_pb2.VARIABLE def __init__(self, host=None): self.data = host self.__id = host.id self.stub = Cuebot.getStub('host')
Apache License 2.0
ali5h/rules_pip
third_party/py/pip/_vendor/requests/api.py
request
python
def request(method, url, **kwargs): with sessions.Session() as session: return session.request(method=method, url=url, **kwargs)
Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') >>> req <Response [200]>
https://github.com/ali5h/rules_pip/blob/fb02cb7bf5c03bc8cd4269679e4aea2e1839b501/third_party/py/pip/_vendor/requests/api.py#L16-L61
from . import sessions
MIT License
dbrattli/oslash
oslash/state.py
State.get
python
def get(cls) -> "State[TState, TState]": return State(lambda state: (state, state))
r"""get = state $ \s -> (s, s)
https://github.com/dbrattli/oslash/blob/c271c7633daf9d72393b419cfc9229e427e6a42a/oslash/state.py#L58-L60
from typing import Callable, Tuple, Any, TypeVar, Generic from .util import Unit from .typing import Functor from .typing import Monad TState = TypeVar("TState") TSource = TypeVar("TSource") TResult = TypeVar("TResult") class State(Generic[TSource, TState]): def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None: self._fn = fn @classmethod def unit(cls, value: TSource) -> "State[TSource, TState]": return cls(lambda state: (value, state)) def map(self, mapper: Callable[[TSource], TResult]) -> "State[TResult, TState]": def _(a: Any, state: Any) -> Tuple[Any, Any]: return mapper(a), state return State(lambda state: _(*self.run(state))) def bind(self, fn: Callable[[TSource], "State[TState, TResult]"]) -> "State[TResult, TState]": def _(result: Any, state: Any) -> Tuple[Any, Any]: return fn(result).run(state) return State(lambda state: _(*self.run(state))) @classmethod
MIT License
balena-io/balena-sdk-python
balena/models/device.py
Device.get_display_name
python
def get_display_name(self, device_type_slug): device_types = self.config.get_device_types() display_name = [device['name'] for device in device_types if device['slug'] == device_type_slug] if display_name: return display_name[0] else: raise exceptions.InvalidDeviceType(device_type_slug)
Get display name for a device. Args: device_type_slug (str): device type slug. Returns: str: device display name. Raises: InvalidDeviceType: if device type slug is not supported. Examples: >>> balena.models.device.get_display_name('intel-edison') u'Intel Edison' >>> balena.models.device.get_display_name('raspberry-pi') u'Raspberry Pi'
https://github.com/balena-io/balena-sdk-python/blob/29772fa523b15d597b0ddcd8178f9395cdda4450/balena/models/device.py#L575-L602
import sys import binascii import os from datetime import datetime import json from collections import defaultdict import semver try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin from ..base_request import BaseRequest from .config import Config from .device_os import DeviceOs from ..settings import Settings from ..auth import Auth from .. import exceptions from ..resources import Message from .application import Application from .release import Release from .hup import Hup LOCAL_MODE_MIN_OS_VERSION = '2.0.0' LOCAL_MODE_MIN_SUPERVISOR_VERSION = '4.0.0' LOCAL_MODE_ENV_VAR = 'RESIN_SUPERVISOR_LOCAL_MODE' OVERRIDE_LOCK_ENV_VAR = 'RESIN_OVERRIDE_LOCK' class DeviceStatus(object): IDLE = "Idle" CONFIGURING = "Configuring" UPDATING = "Updating" OFFLINE = "Offline" POST_PROVISIONING = "Post Provisioning" INACTIVE = "Inactive" class Device(object): def __init__(self): self.base_request = BaseRequest() self.config = Config() self.settings = Settings() self.application = Application() self.auth = Auth() self.release = Release() self.device_os = DeviceOs() self.hup = Hup() def __upsert_device_config_variable(self, device, name, value): try: data = { 'device': device['id'], 'name': name, 'value': value } return self.base_request.request( 'device_config_variable', 'POST', data=data, endpoint=self.settings.get('pine_endpoint') ) except exceptions.RequestError as e: if 'Unique key constraint violated' in e.message or 'must be unique' in e.message: params = { 'filters': { 'device': device['id'], 'name': name } } data = { 'value': value } return self.base_request.request( 'device_config_variable', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) raise e def __get_applied_device_config_variable_value(self, uuid, name): raw_query = "$filter=uuid%20eq%20'{uuid}'&$expand=device_config_variable($select=value&$filter=name%20eq%20'{name}'),belongs_to__application($select=id&$expand=application_config_variable($select=value&$filter=name%20eq%20'{name}'))".format(name=name, uuid=uuid) raw_data = self.base_request.request( 'device', 'GET', raw_query=raw_query, endpoint=self.settings.get('pine_endpoint') )['d'] if len(raw_data) > 0: device_config = raw_data[0]['device_config_variable'] app_config = raw_data[0]['belongs_to__application'][0]['application_config_variable'] if device_config: return device_config[0]['value'] elif app_config: return app_config[0]['value'] return None else: raise exceptions.DeviceNotFound(uuid) def get(self, uuid): params = { 'filter': 'uuid', 'eq': uuid } try: devices = self.base_request.request( 'device', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] if len(devices) > 1: raise exceptions.AmbiguousDevice(uuid) return devices[0] except IndexError: raise exceptions.DeviceNotFound(uuid) def get_all(self): return self.base_request.request( 'device', 'GET', endpoint=self.settings.get('pine_endpoint'))['d'] def get_all_by_application(self, name): params = { 'filter': 'app_name', 'eq': name } app = self.base_request.request( 'application', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] if app: params = { 'filter': 'belongs_to__application', 'eq': app[0]['id'] } return self.base_request.request( 'device', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] def get_all_by_application_id(self, appid): params = { 'filter': 'belongs_to__application', 'eq': appid } return self.base_request.request( 'device', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] def get_by_name(self, name): params = { 'filter': 'device_name', 'eq': name } return self.base_request.request( 'device', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] def __get_single_install_summary(self, raw_data): image = raw_data['image'][0] service = image['is_a_build_of__service'][0] release = None if 'is_provided_by__release' in raw_data: release = raw_data['is_provided_by__release'][0] install = { 'service_name': service['service_name'], 'image_id': image['id'], 'service_id': service['id'], } if release: install['commit'] = release['commit'] raw_data.pop('is_provided_by__release', None) raw_data.pop('image', None) install.update(raw_data) return install def get_with_service_details(self, uuid, expand_release=True): release = '' if expand_release: release = ",is_provided_by__release($select=id,commit)" raw_query = "$filter=uuid%20eq%20'{uuid}'&$expand=image_install($select=id,download_progress,status,install_date&$filter=status%20ne%20'deleted'&$expand=image($select=id&$expand=is_a_build_of__service($select=id,service_name)){release}),gateway_download($select=id,download_progress,status&$filter=status%20ne%20'deleted'&$expand=image($select=id&$expand=is_a_build_of__service($select=id,service_name)))".format(uuid=uuid, release=release) raw_data = self.base_request.request( 'device', 'GET', raw_query=raw_query, endpoint=self.settings.get('pine_endpoint') )['d'] if len(raw_data) == 0: raise exceptions.DeviceNotFound(uuid) else: raw_data = raw_data[0] groupedServices = defaultdict(list) for obj in [self.__get_single_install_summary(i) for i in raw_data['image_install']]: groupedServices[obj.pop('service_name', None)].append(obj) raw_data['current_services'] = dict(groupedServices) raw_data['current_gateway_downloads'] = [self.__get_single_install_summary(i) for i in raw_data['gateway_download']] raw_data.pop('image_install', None) raw_data.pop('gateway_download', None) return raw_data def get_name(self, uuid): return self.get(uuid)['device_name'] def get_application_name(self, uuid): app_id = self.get(uuid)['belongs_to__application']['__id'] params = { 'filter': 'id', 'eq': app_id } return self.base_request.request( 'application', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'][0]['app_name'] def has(self, uuid): params = { 'filter': 'uuid', 'eq': uuid } return len( self.base_request.request( 'device', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] ) > 0 def is_online(self, uuid): return self.get(uuid)['is_online'] def get_local_ip_address(self, uuid): if self.is_online(uuid): device = self.get(uuid) return list(set(device['ip_address'].split()) - set(device['vpn_address'].split())) else: raise exceptions.DeviceOffline(uuid) def deactivate(self, uuid): if self.has(uuid): params = { 'filter': 'uuid', 'eq': uuid } data = { 'is_active': False } return self.base_request.request( 'device', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) else: raise exceptions.DeviceNotFound(uuid) def remove(self, uuid): params = { 'filter': 'uuid', 'eq': uuid } return self.base_request.request( 'device', 'DELETE', params=params, endpoint=self.settings.get('pine_endpoint'), login=True ) def identify(self, uuid): data = { 'uuid': uuid } return self.base_request.request( 'blink', 'POST', data=data, endpoint=self.settings.get('api_endpoint'), login=True ) def rename(self, uuid, new_name): if self.has(uuid): params = { 'filter': 'uuid', 'eq': uuid } data = { 'device_name': new_name } return self.base_request.request( 'device', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) else: raise exceptions.DeviceNotFound(uuid) def note(self, uuid, note): if self.has(uuid): params = { 'filter': 'uuid', 'eq': uuid } data = { 'note': note } return self.base_request.request( 'device', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) else: raise exceptions.DeviceNotFound(uuid)
Apache License 2.0
airbnb/streamalert
streamalert/shared/firehose.py
FirehoseClient.generate_firehose_name
python
def generate_firehose_name(cls, prefix, log_stream_name): if prefix: prefix += '_' stream_name = cls.sanitized_value(cls.DEFAULT_FIREHOSE_FMT.format(prefix, log_stream_name)) if len(stream_name) <= cls.AWS_FIREHOSE_NAME_MAX_LEN: return stream_name base_name = stream_name[:cls.AWS_FIREHOSE_NAME_MAX_LEN - cls.FIREHOSE_NAME_MIN_HASH_LEN] if not base_name.endswith('_'): base_name = '{}_'.format( base_name[:-1] ) if base_name[-2] != '_' else '{}_'.format(base_name[:-2]) return '{}{}'.format( base_name, hashlib.md5(stream_name.encode()).hexdigest() )[:cls.AWS_FIREHOSE_NAME_MAX_LEN]
Generate suffix of stream name complaint to firehose naming restriction, no longer than 64 characters Args: prefix (str): The prefix defined in conf/global.json to firehose stream name log_stream_name (str): The name of the log from conf/logs.json or conf/schemas/*.json Returns: str: suffix of stream name
https://github.com/airbnb/streamalert/blob/ceb680b7b821ebf6b6800adf164977e57c886fef/streamalert/shared/firehose.py#L305-L335
from collections import defaultdict import json import hashlib import re import backoff import boto3 from botocore.exceptions import ClientError, HTTPClientError from botocore.exceptions import ConnectionError as BotocoreConnectionError from streamalert.shared import CLASSIFIER_FUNCTION_NAME import streamalert.shared.helpers.boto as boto_helpers from streamalert.shared.logger import get_logger from streamalert.shared.metrics import MetricLogger from streamalert.shared.backoff_handlers import ( backoff_handler, giveup_handler, success_handler ) LOGGER = get_logger(__name__) class FirehoseClient: SPECIAL_CHAR_REGEX = re.compile(r'\W') SPECIAL_CHAR_SUB = '_' MAX_BACKOFF_ATTEMPTS = 10 MAX_BACKOFF_FIBO_VALUE = 8 MAX_BATCH_COUNT = 500 MAX_BATCH_SIZE = 4000 * 1000 MAX_RECORD_SIZE = 1000 * 1000 - 2 DEFAULT_FIREHOSE_FMT = '{}streamalert_{}' EXCEPTIONS_TO_BACKOFF = (ClientError, BotocoreConnectionError, HTTPClientError) _ENABLED_LOGS = dict() AWS_FIREHOSE_NAME_MAX_LEN = 64 FIREHOSE_NAME_MIN_HASH_LEN = 8 def __init__(self, prefix, firehose_config=None, log_sources=None): self._prefix = prefix if firehose_config and firehose_config.get('use_prefix', True) else '' self._client = boto3.client('firehose', config=boto_helpers.default_config()) self.load_enabled_log_sources(firehose_config, log_sources, force_load=True) @classmethod def _records_to_json_list(cls, records): return [ json.dumps(record, separators=(',', ':')) + '\n' for record in records ] @classmethod def _record_batches(cls, records, function_name): records_json = cls._records_to_json_list(records) current_batch_size = 0 current_batch = [] for record in records_json: line_len = len(record) if ((len(current_batch) == cls.MAX_BATCH_COUNT) or (current_batch_size + line_len > cls.MAX_BATCH_SIZE)) and current_batch: yield current_batch[:] current_batch_size = 0 del current_batch[:] if line_len > cls.MAX_RECORD_SIZE: LOGGER.error('Record too large (%d) to send to Firehose:\n%s', line_len, record) cls._log_failed(1, function_name) continue current_batch_size += line_len current_batch.append(record) if current_batch: yield current_batch @classmethod def sanitized_value(cls, key): return re.sub(cls.SPECIAL_CHAR_REGEX, cls.SPECIAL_CHAR_SUB, key) @classmethod def sanitize_keys(cls, record): new_record = {} for key, value in record.items(): sanitized_key = cls.sanitized_value(key) if isinstance(value, dict): new_record[sanitized_key] = cls.sanitize_keys(record[key]) else: new_record[sanitized_key] = record[key] return new_record @staticmethod def _strip_successful_records(batch, response): success_indices = [idx for idx, rec in enumerate(response['RequestResponses']) if rec.get('RecordId')] for idx in sorted(success_indices, reverse=True): del batch[idx] def _categorize_records(self, payloads): categorized_records = defaultdict(list) for payload in payloads: if not self.enabled_log_source(payload.log_schema_type): continue categorized_records[payload.log_schema_type].extend(payload.parsed_records) return categorized_records @classmethod def _finalize(cls, response, stream_name, size, function_name): if not response: return if response.get('FailedPutCount'): failed_records = [ failed for failed in response['RequestResponses'] if failed.get('ErrorCode') ] cls._log_failed(response['FailedPutCount'], function_name) LOGGER.error( 'Failed to put the following records to firehose %s: %s', stream_name, json.dumps(failed_records[:1024], indent=2) ) return MetricLogger.log_metric(function_name, MetricLogger.FIREHOSE_RECORDS_SENT, size) LOGGER.info( 'Successfully sent %d message(s) to firehose %s with RequestId \'%s\'', size, stream_name, response.get('ResponseMetadata', {}).get('RequestId', '') ) @classmethod def _log_failed(cls, count, function_name): MetricLogger.log_metric(function_name, MetricLogger.FIREHOSE_FAILED_RECORDS, count) def _send_batch(self, stream_name, record_batch, function_name): @backoff.on_predicate(backoff.fibo, lambda resp: resp['FailedPutCount'] > 0, max_tries=self.MAX_BACKOFF_ATTEMPTS, max_value=self.MAX_BACKOFF_FIBO_VALUE, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) @backoff.on_exception(backoff.fibo, self.EXCEPTIONS_TO_BACKOFF, max_tries=self.MAX_BACKOFF_ATTEMPTS, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) def _firehose_request_helper(data): LOGGER.debug('Sending %d records to firehose %s', len(data), stream_name) response = self._client.put_record_batch(DeliveryStreamName=stream_name, Records=data) if response['FailedPutCount'] > 0: LOGGER.warning('Received non-zero FailedPutCount: %d', response['FailedPutCount']) self._strip_successful_records(data, response) return response records_data = [ {'Data': record} for record in record_batch ] try: return _firehose_request_helper(records_data) except self.EXCEPTIONS_TO_BACKOFF: LOGGER.exception('Firehose request failed') self._log_failed(len(records_data), function_name) @classmethod
Apache License 2.0
linrongc/solution_youtube8m_v3
parallel_eval.py
evaluation_loop
python
def evaluation_loop(fetches, saver, summary_writer, evl_metrics, last_global_step_val): global_step_val = -1 with tf.Session( config=tf.ConfigProto(gpu_options=tf.GPUOptions( allow_growth=True),allow_soft_placement=True)) as sess: latest_checkpoint = tf.train.latest_checkpoint(FLAGS.train_dir) if latest_checkpoint: logging.info("Loading checkpoint for eval: %s", latest_checkpoint) saver.restore(sess, latest_checkpoint) global_step_val = os.path.basename(latest_checkpoint).split("-")[-1] saver.save( sess, os.path.join(FLAGS.train_dir, "inference_model", "inference_model")) else: logging.info("No checkpoint file found.") return global_step_val if global_step_val == last_global_step_val: logging.info( "skip this checkpoint global_step_val=%s " "(same as the previous one).", global_step_val) return global_step_val sess.run([tf.local_variables_initializer()]) coord = tf.train.Coordinator() try: threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend( qr.create_threads(sess, coord=coord, daemon=True, start=True)) logging.info("enter eval_once loop global_step_val = %s. ", global_step_val) evl_metrics.clear() examples_processed = 0 while not coord.should_stop(): batch_start_time = time.time() output_data_dict = sess.run(fetches) seconds_per_batch = time.time() - batch_start_time labels_val = output_data_dict["labels"] summary_val = output_data_dict["summary"] example_per_second = labels_val.shape[0] / seconds_per_batch examples_processed += labels_val.shape[0] predictions = output_data_dict["predictions"] if FLAGS.segment_labels: predictions *= output_data_dict["label_weights"] iteration_info_dict = evl_metrics.accumulate(predictions, labels_val, output_data_dict["loss"]) iteration_info_dict["examples_per_second"] = example_per_second iterinfo = utils.AddGlobalStepSummary( summary_writer, global_step_val, iteration_info_dict, summary_scope="Eval") logging.info("examples_processed: %d | %s", examples_processed, iterinfo) except tf.errors.OutOfRangeError as e: logging.info( "Done with batched inference. Now calculating global performance " "metrics.") epoch_info_dict = evl_metrics.get() epoch_info_dict["epoch_id"] = global_step_val summary_writer.add_summary(summary_val, global_step_val) epochinfo = utils.AddEpochSummary( summary_writer, global_step_val, epoch_info_dict, summary_scope="Eval") logging.info(epochinfo) evl_metrics.clear() except Exception as e: logging.info("Unexpected exception: %s", str(e)) coord.request_stop(e) coord.request_stop() coord.join(threads, stop_grace_period_secs=10) logging.info("Total: examples_processed: %d", examples_processed) return global_step_val
Run the evaluation loop once. Args: fetches: a dict of tensors to be run within Session. saver: a tensorflow saver to restore the model. summary_writer: a tensorflow summary_writer evl_metrics: an EvaluationMetrics object. last_global_step_val: the global step used in the previous evaluation. Returns: The global_step used in the latest model.
https://github.com/linrongc/solution_youtube8m_v3/blob/7d71bde4386e698cbba83931e2e3424238a2363a/parallel_eval.py#L228-L334
import json import os import time from absl import logging import eval_util import frame_level_models import nextvlad import losses import readers import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow import app from tensorflow import flags from tensorflow import gfile from tensorflow.python.lib.io import file_io from tensorflow.python.client import device_lib import utils import video_level_models import numpy as np FLAGS = flags.FLAGS if __name__ == "__main__": flags.DEFINE_string( "train_dir", "/tmp/yt8m_model/", "The directory to load the model files from. " "The tensorboard metrics files are also saved to this " "directory.") flags.DEFINE_string( "eval_data_pattern", "", "File glob defining the evaluation dataset in tensorflow.SequenceExample " "format. The SequenceExamples are expected to have an 'rgb' byte array " "sequence feature as well as a 'labels' int64 context feature.") flags.DEFINE_bool( "segment_labels", False, "If set, then --eval_data_pattern must be frame-level features (but with" " segment_labels). Otherwise, --eval_data_pattern must be aggregated " "video-level features. The model must also be set appropriately (i.e. to " "read 3D batches VS 4D batches.") flags.DEFINE_integer( "num_gpu", 1, "The maximum number of GPU devices to use for training. " ) flags.DEFINE_float("final_temperature", 1.0, "the temperature of final online learning") flags.DEFINE_float("final_lambda", 1.0, "the lambda of final online learning") flags.DEFINE_integer("batch_size", 1024, "How many examples to process per batch.") flags.DEFINE_integer("num_readers", 8, "How many threads to use for reading input files.") flags.DEFINE_boolean("run_once", False, "Whether to run eval only once.") flags.DEFINE_integer("top_k", 20, "How many predictions to output per video.") def find_class_by_name(name, modules): modules = [getattr(module, name, None) for module in modules] return next(a for a in modules if a) def get_input_evaluation_tensors(reader, data_pattern, batch_size=1024, num_readers=1): logging.info("Using batch size of %d for evaluation.", batch_size) with tf.name_scope("eval_input"): files = gfile.Glob(data_pattern) if not files: raise IOError("Unable to find the evaluation files.") logging.info("number of evaluation files: %d", len(files)) filename_queue = tf.train.string_input_producer( files, shuffle=False, num_epochs=1) eval_data = [ reader.prepare_reader(filename_queue) for _ in range(num_readers) ] return tf.train.batch_join( eval_data, batch_size=batch_size, capacity=3 * batch_size, allow_smaller_final_batch=True, enqueue_many=True) def build_graph(reader, model, eval_data_pattern, label_loss_fn, batch_size=1024, num_readers=1): global_step = tf.Variable(0, trainable=False, name="global_step") input_data_dict = get_input_evaluation_tensors( reader, eval_data_pattern, batch_size=batch_size, num_readers=num_readers) video_id_batch = input_data_dict["video_ids"] model_input_raw = input_data_dict["video_matrix"] labels_batch = input_data_dict["labels"] num_frames = input_data_dict["num_frames"] tf.summary.histogram("model_input_raw", model_input_raw) local_device_protos = device_lib.list_local_devices() gpus = [x.name for x in local_device_protos if x.device_type == "GPU"] gpus = gpus[:FLAGS.num_gpu] num_gpus = len(gpus) if num_gpus > 0: logging.info("Using the following GPUs to train: " + str(gpus)) num_towers = num_gpus device_string = "/gpu:%d" else: logging.info("No GPUs found. Training on CPU.") num_towers = 1 device_string = "/cpu:%d" print("flags!!!", device_string) if FLAGS.segment_labels: label_weights = input_data_dict["label_weights"] else: label_weights = None offset = np.array([4. / 512] * 1024 + [0] * 128) offset = tf.constant(offset, dtype=tf.float32) eigen_val = tf.constant(np.sqrt(np.load("yt8m_pca/eigenvals.npy")[:1024, 0]), dtype=tf.float32) model_input = tf.multiply(model_input_raw - offset, tf.pad(eigen_val + 1e-4, [[0, 128]], constant_values=1.)) tower_logits = [] for i in range(num_towers): with tf.device(device_string % i): with tf.variable_scope("tower_%d" % i, reuse=False): result = model.create_model( model_input, num_frames=num_frames, vocab_size=reader.num_classes, labels=labels_batch, is_training=False) logits = result["logits"] tower_logits.append(logits) with tf.device(device_string % 0): with tf.variable_scope("ensemble"): ftr_mean = tf.reduce_mean(model_input, axis=1) print("ftr mean shape: ", ftr_mean.get_shape().as_list()) ftr_mean = slim.batch_norm(ftr_mean, center=True, scale=True, fused=False, is_training=False, scope="mix_weights_bn") mix_weights = slim.fully_connected(ftr_mean, num_towers, activation_fn=None, weights_initializer=slim.variance_scaling_initializer(), scope="mix_weights") mix_weights = tf.nn.softmax(mix_weights, axis=-1) tf.summary.histogram("mix_weights", mix_weights) logits = tf.stack(tower_logits, axis=1) final_logit = tf.reduce_sum(tf.multiply(logits, tf.expand_dims(mix_weights, axis=-1)), axis=1, keepdims=False) final_predictions = tf.nn.sigmoid(final_logit) final_label_loss = label_loss_fn.calculate_loss(final_predictions, labels_batch, label_weights=label_weights) tf.summary.scalar("label_loss", final_label_loss) tf.add_to_collection("global_step", global_step) tf.add_to_collection("loss", final_label_loss) tf.add_to_collection("predictions", final_predictions) tf.add_to_collection("input_batch", model_input) tf.add_to_collection("input_batch_raw", model_input_raw) tf.add_to_collection("video_id_batch", video_id_batch) tf.add_to_collection("num_frames", num_frames) tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32)) if FLAGS.segment_labels: tf.add_to_collection("label_weights", input_data_dict["label_weights"]) tf.add_to_collection("summary_op", tf.summary.merge_all())
Apache License 2.0
googleapis/artman
artman/pipelines/gapic_generation.py
GapicTaskFactory._get_grpc_codegen_tasks
python
def _get_grpc_codegen_tasks(self, language, **kw): grpc_factory = grpc_gen.ProtoGenTaskFactory(gen_grpc=True, language=language, **kw) return grpc_factory.get_grpc_codegen_tasks(**kw)
Return the code generation tasks for making a GRPC package. Args: language (str): The language code is being generated in. kw (dict): Additional keyword arguments passed through to the grpc codegen task factory. Returns: list: A list of Task subclasses defined by the GRPC task factory.
https://github.com/googleapis/artman/blob/ded22a6640a0e59d02fbe7716dcafd530b754e6d/artman/pipelines/gapic_generation.py#L182-L196
from __future__ import absolute_import from artman.pipelines import code_generation as code_gen from artman.pipelines import grpc_generation as grpc_gen from artman.tasks import emit_success from artman import tasks from artman.utils import task_utils _GAPIC_REQUIRED = ['service_yaml', 'gapic_yaml', 'language', 'aspect'] _DISCOGAPIC_REQUIRED = ['gapic_yaml', 'language'] class GapicConfigPipeline(code_gen.CodeGenerationPipelineBase): def __init__(self, **kwargs): super(GapicConfigPipeline, self).__init__( GapicConfigTaskFactory(), **kwargs) class DiscoGapicConfigPipeline(code_gen.CodeGenerationPipelineBase): def __init__(self, **kwargs): super(DiscoGapicConfigPipeline, self).__init__( DiscoGapicConfigTaskFactory(), **kwargs) class GapicConfigTaskFactory(code_gen.TaskFactoryBase): def get_tasks(self, **kwargs): return task_utils.instantiate_tasks([ tasks.protoc.ProtoDescGenTask, tasks.gapic.GapicConfigGenTask, tasks.gapic.GapicConfigMoveTask ], kwargs) def get_validate_kwargs(self): return code_gen.COMMON_REQUIRED def get_invalid_kwargs(self): return ['language'] class DiscoGapicConfigTaskFactory(code_gen.TaskFactoryBase): def get_tasks(self, **kwargs): return task_utils.instantiate_tasks([ tasks.gapic.DiscoGapicConfigGenTask, tasks.gapic.GapicConfigMoveTask ], kwargs) def get_validate_kwargs(self): return code_gen.COMMON_DISCO_REQUIRED def get_invalid_kwargs(self): return ['language'] class GapicOnlyClientPipeline(code_gen.CodeGenerationPipelineBase): def __init__(self, language, **kwargs): super(GapicOnlyClientPipeline, self).__init__( GapicOnlyTaskFactory(), language=language, **kwargs ) class GapicClientPipeline(code_gen.CodeGenerationPipelineBase): def __init__(self, language, **kwargs): super(GapicClientPipeline, self).__init__( GapicTaskFactory(), language=language, **kwargs ) class DiscoGapicClientPipeline(code_gen.CodeGenerationPipelineBase): def __init__(self, language, **kwargs): super(DiscoGapicClientPipeline, self).__init__( DiscoGapicTaskFactory(), language=language, **kwargs ) class CSharpPackagingTaskFactory(code_gen.TaskFactoryBase): def get_tasks(self, **kwargs): return [ tasks.gapic.CSharpGapicPackagingTask ] def get_validate_kwargs(self): return ['gapic_code_dir', 'grpc_code_dir', 'proto_code_dir', 'gapic_yaml'] def get_invalid_kwargs(self): return [] PACKAGING_TASK_FACTORY_DICT = { 'csharp': CSharpPackagingTaskFactory } class GapicTaskFactory(code_gen.TaskFactoryBase): def get_tasks(self, **kwargs): answer = [] if 'gapic_code_dir' in kwargs: answer = self._get_gapic_codegen_tasks(**kwargs) for grpc_task in self._get_grpc_codegen_tasks(**kwargs): if grpc_task not in answer: answer.append(grpc_task) for packaging_task in self._get_packaging_tasks(**kwargs): if packaging_task not in answer: answer.append(packaging_task) answer += emit_success.TASKS return task_utils.instantiate_tasks(answer, kwargs) def _get_gapic_codegen_tasks(self, language, **kwargs): return [ tasks.protoc.ProtoDescGenTask, tasks.descriptor.get_descriptor_set_task(language), tasks.package_metadata.PackageMetadataConfigGenTask, tasks.gapic.GapicCodeGenTask, tasks.format.get_format_task(language), ]
Apache License 2.0
googleapis/python-talent
google/cloud/talent_v4beta1/services/company_service/client.py
CompanyServiceClient.get_company
python
def get_company( self, request: Union[company_service.GetCompanyRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> company.Company: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, company_service.GetCompanyRequest): request = company_service.GetCompanyRequest(request) if name is not None: request.name = name rpc = self._transport._wrapped_methods[self._transport.get_company] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
r"""Retrieves specified company. Args: request (Union[google.cloud.talent_v4beta1.types.GetCompanyRequest, dict]): The request object. Request for getting a company by name. name (str): Required. The resource name of the company to be retrieved. The format is "projects/{project_id}/tenants/{tenant_id}/companies/{company_id}", for example, "projects/api-test-project/tenants/foo/companies/bar". If tenant id is unspecified, the default tenant is used, for example, "projects/api-test-project/companies/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.talent_v4beta1.types.Company: A Company resource represents a company in the service. A company is the entity that owns job postings, that is, the hiring entity responsible for employing applicants for the job position.
https://github.com/googleapis/python-talent/blob/a9324567ba2757dfc0c3b688c968210862f2c8d1/google/cloud/talent_v4beta1/services/company_service/client.py#L444-L525
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.cloud.talent_v4beta1.services.company_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import company from google.cloud.talent_v4beta1.types import company as gct_company from google.cloud.talent_v4beta1.types import company_service from .transports.base import CompanyServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import CompanyServiceGrpcTransport from .transports.grpc_asyncio import CompanyServiceGrpcAsyncIOTransport class CompanyServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = CompanyServiceGrpcTransport _transport_registry["grpc_asyncio"] = CompanyServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[CompanyServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class CompanyServiceClient(metaclass=CompanyServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> CompanyServiceTransport: return self._transport @staticmethod def company_path(project: str, tenant: str, company: str,) -> str: return "projects/{project}/tenants/{tenant}/companies/{company}".format( project=project, tenant=tenant, company=company, ) @staticmethod def parse_company_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/companies/(?P<company>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, CompanyServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) if isinstance(transport, CompanyServiceTransport): if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, ) def create_company( self, request: Union[company_service.CreateCompanyRequest, dict] = None, *, parent: str = None, company: gct_company.Company = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_company.Company: has_flattened_params = any([parent, company]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, company_service.CreateCompanyRequest): request = company_service.CreateCompanyRequest(request) if parent is not None: request.parent = parent if company is not None: request.company = company rpc = self._transport._wrapped_methods[self._transport.create_company] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
Apache License 2.0
ouranosinc/xclim
xclim/indices/fwi.py
initial_spread_index
python
def initial_spread_index(ws, ffmc): mo = 147.2 * (101.0 - ffmc) / (59.5 + ffmc) ff = 19.1152 * np.exp(mo * -0.1386) * (1.0 + (mo ** 5.31) / 49300000.0) isi = ff * np.exp(0.05039 * ws) return isi
Initialize spread index. Parameters ---------- ws : array Noon wind speed [km/h]. ffmc : array Fine fuel moisture code. Returns ------- array Initial spread index.
https://github.com/ouranosinc/xclim/blob/7d5a2ea03af433bd088ea72d9402e1e090531c39/xclim/indices/fwi.py#L430-L448
from collections import OrderedDict from typing import Mapping, Optional, Sequence, Union import numpy as np import xarray as xr from numba import jit, vectorize from xclim.core.units import convert_units_to, declare_units from . import run_length as rl default_params = dict( temp_start_thresh=(12, "degC"), temp_end_thresh=(5, "degC"), snow_thresh=(0.01, "m"), temp_condition_days=3, snow_condition_days=3, carry_over_fraction=0.75, wetting_efficiency_fraction=0.75, dc_start=15, dmc_start=6, ffmc_start=85, prec_thresh=(1.0, "mm/d"), dc_dry_factor=5, dmc_dry_factor=2, snow_cover_days=60, snow_min_cover_frac=0.75, snow_min_mean_depth=(0.1, "m"), ) DAY_LENGTHS = np.array( [ [11.5, 10.5, 9.2, 7.9, 6.8, 6.2, 6.5, 7.4, 8.7, 10, 11.2, 11.8], [10.1, 9.6, 9.1, 8.5, 8.1, 7.8, 7.9, 8.3, 8.9, 9.4, 9.9, 10.2], 12 * [9], [7.9, 8.4, 8.9, 9.5, 9.9, 10.2, 10.1, 9.7, 9.1, 8.6, 8.1, 7.8], [6.5, 7.5, 9, 12.8, 13.9, 13.9, 12.4, 10.9, 9.4, 8, 7, 6], ] ) DAY_LENGTH_FACTORS = np.array( [ [6.4, 5.0, 2.4, 0.4, -1.6, -1.6, -1.6, -1.6, -1.6, 0.9, 3.8, 5.8], 12 * [1.39], [-1.6, -1.6, -1.6, 0.9, 3.8, 5.8, 6.4, 5.0, 2.4, 0.4, -1.6, -1.6], ] ) @jit def _day_length(lat: Union[int, float], mth: int): if -30 > lat >= -90: dl = DAY_LENGTHS[0, :] elif -15 > lat >= -30: dl = DAY_LENGTHS[1, :] elif 15 > lat >= -15: return 9 elif 30 > lat >= 15: dl = DAY_LENGTHS[3, :] elif 90 >= lat >= 30: dl = DAY_LENGTHS[4, :] elif lat > 90 or lat < -90: raise ValueError("Invalid lat specified.") else: raise ValueError return dl[mth - 1] @jit def _day_length_factor(lat: float, mth: int): if -15 > lat >= -90: dlf = DAY_LENGTH_FACTORS[0, :] elif 15 > lat >= -15: return 1.39 elif 90 >= lat >= 15: dlf = DAY_LENGTH_FACTORS[2, :] elif lat > 90 or lat < -90: raise ValueError("Invalid lat specified.") else: raise ValueError return dlf[mth - 1] @vectorize def _fine_fuel_moisture_code(t, p, w, h, ffmc0): mo = (147.2 * (101.0 - ffmc0)) / (59.5 + ffmc0) if p > 0.5: rf = p - 0.5 if mo > 150.0: mo = ( mo + 42.5 * rf * np.exp(-100.0 / (251.0 - mo)) * (1.0 - np.exp(-6.93 / rf)) ) + (0.0015 * (mo - 150.0) ** 2) * np.sqrt(rf) elif mo <= 150.0: mo = mo + 42.5 * rf * np.exp(-100.0 / (251.0 - mo)) * ( 1.0 - np.exp(-6.93 / rf) ) if mo > 250.0: mo = 250.0 ed = ( 0.942 * (h ** 0.679) + (11.0 * np.exp((h - 100.0) / 10.0)) + 0.18 * (21.1 - t) * (1.0 - 1.0 / np.exp(0.1150 * h)) ) if mo < ed: ew = ( 0.618 * (h ** 0.753) + (10.0 * np.exp((h - 100.0) / 10.0)) + 0.18 * (21.1 - t) * (1.0 - 1.0 / np.exp(0.115 * h)) ) if mo < ew: kl = 0.424 * (1.0 - ((100.0 - h) / 100.0) ** 1.7) + ( 0.0694 * np.sqrt(w) ) * (1.0 - ((100.0 - h) / 100.0) ** 8) kw = kl * (0.581 * np.exp(0.0365 * t)) m = ew - (ew - mo) / 10.0 ** kw elif mo > ew: m = mo elif mo == ed: m = mo else: kl = 0.424 * (1.0 - (h / 100.0) ** 1.7) + (0.0694 * np.sqrt(w)) * ( 1.0 - (h / 100.0) ** 8 ) kw = kl * (0.581 * np.exp(0.0365 * t)) m = ed + (mo - ed) / 10.0 ** kw ffmc = (59.5 * (250.0 - m)) / (147.2 + m) if ffmc > 101.0: ffmc = 101.0 elif ffmc <= 0.0: ffmc = 0.0 return ffmc @vectorize def _duff_moisture_code(t, p, h, mth: int, lat: float, dmc0: float): if np.isnan(dmc0): return np.nan dl = _day_length(lat, mth) if t < -1.1: rk = 0 else: rk = 1.894 * (t + 1.1) * (100.0 - h) * dl * 0.0001 if p > 1.5: ra = p rw = 0.92 * ra - 1.27 wmi = 20.0 + 280.0 / np.exp( 0.023 * dmc0 ) if dmc0 <= 33.0: b = 100.0 / (0.5 + 0.3 * dmc0) else: if dmc0 <= 65.0: b = 14.0 - 1.3 * np.log(dmc0) else: b = 6.2 * np.log(dmc0) - 17.2 wmr = wmi + (1000 * rw) / (48.77 + b * rw) pr = 43.43 * (5.6348 - np.log(wmr - 20.0)) else: pr = dmc0 if pr < 0.0: pr = 0.0 dmc = pr + rk if dmc < 0: dmc = 0.0 return dmc @vectorize def _drought_code(t, p, mth, lat, dc0): fl = _day_length_factor(lat, mth) if t < -2.8: t = -2.8 pe = (0.36 * (t + 2.8) + fl) / 2 if pe < 0.0: pe = 0.0 if p > 2.8: ra = p rw = 0.83 * ra - 1.27 smi = 800.0 * np.exp(-dc0 / 400.0) dr = dc0 - 400.0 * np.log(1.0 + ((3.937 * rw) / smi)) if dr > 0.0: dc = dr + pe elif np.isnan(dc0): dc = np.NaN else: dc = pe else: dc = dc0 + pe return dc
Apache License 2.0
timsusa/aptly_api_cli
aptly_cli/api/api.py
AptlyApiRequests.file_list_directories
python
def file_list_directories(self): r = requests.get(self.cfg['route_file'], headers=self.headers) resp_data = json.loads(r.content) return resp_data
LIST DIRECTORIES GET /api/files List all directories. Response: list of directory names. Example: $ curl http://localhost:8080/api/files
https://github.com/timsusa/aptly_api_cli/blob/011ba8e7f464726b336b53f6b2cbdc4490b5180c/aptly_cli/api/api.py#L396-L410
import json import requests import os from ConfigParser import ConfigParser class AptlyApiRequests(object): def __init__(self): self.configfile = None cfg_file = self.get_config_from_file() if cfg_file is not None: basic_url = cfg_file['basic_url'] port = cfg_file['port'] else: basic_url = 'http://localhost' port = ':9003' print "No Config file found, take default values" self.headers = {'content-type': 'application/json'} url = basic_url + port self.cfg = { 'route_snap': url + '/api/snapshots/', 'route_repo': url + '/api/repos/', 'route_file': url + '/api/files/', 'route_pack': url + '/api/packages/', 'route_pub': url + '/api/publish/', 'route_graph': url + '/api/graph/', 'route_vers': url + '/api/version/', } @staticmethod def _out(arg_list): for y in arg_list: print json.dumps(y, indent=2) @staticmethod def get_config_from_file(): home = os.path.expanduser("~") name = home + '/aptly-cli.conf' config_file = ConfigParser() if not config_file.read(name): cfg_file = None else: cfg_file = { 'basic_url': config_file.get('general', 'basic_url'), 'port': config_file.get('general', 'port'), 'prefixes_mirrors': config_file.get('general', 'prefixes_mirrors'), 'save_last_snap': config_file.get('general', 'save_last_snap'), 'save_last_pkg': config_file.get('general', 'save_last_pkg'), 'repos_to_clean': config_file.get('general', 'repos_to_clean'), 'package_prefixes': config_file.get('general', 'package_prefixes'), 'repos': config_file.get('3rd_party', 'repos'), 'staging_snap_pre_post': config_file.get('3rd_party', 'staging_snap_pre_post') } return cfg_file def repo_create(self, repo_name, data=None): if data is None: post_data = { 'Name': repo_name } else: post_data = { 'Name': repo_name, 'Comment': data.comment, 'DefaultDistribution': data.default_distribution, 'DefaultComponent': data.default_component } r = requests.post(self.cfg['route_repo'][:-1], data=json.dumps(post_data), headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_show(self, repo_name): r = requests.get(self.cfg['route_repo'] + repo_name, headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_show_packages(self, repo_name, pkg_to_search=None, with_deps=0, detail='compact'): if pkg_to_search is None: param = { 'withDeps': with_deps, 'format': detail } else: param = { 'q': pkg_to_search, 'withDeps': with_deps, 'format': detail } url = str(self.cfg['route_repo']) + str(repo_name) + '/packages' r = requests.get(url, params=param, headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_edit(self, repo_name, data=None): if data is None: data = {} else: data = { 'Comment': data.comment, 'DefaultDistribution': data.default_distribution, 'DefaultComponent': data.default_component } r = requests.put(self.cfg['route_repo'] + repo_name, data=json.dumps(data), headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_list(self): r = requests.get(self.cfg['route_repo'], headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_delete(self, repo_name): r = requests.delete(self.cfg['route_repo'] + repo_name, headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_add_package_from_upload(self, repo_name, dir_name, file_name=None, params=None): if file_name is None: url = self.cfg['route_repo'] + repo_name + '/file/' + dir_name else: url = self.cfg['route_repo'] + repo_name + '/file/' + dir_name + '/' + file_name if params is not None: query_param = { 'noRemove': params.no_remove, 'forceReplace': params.force_replace } else: query_param = { 'noRemove': 0, 'forceReplace': 0 } r = requests.post(url, params=query_param, headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_add_packages_by_key(self, repo_name, package_key_list): if len(package_key_list) <= 0: print 'No packages were given... aborting' return url = self.cfg['route_repo'] + repo_name + '/packages' param = { 'PackageRefs': package_key_list } r = requests.post(url, data=json.dumps(param), headers=self.headers) resp_data = json.loads(r.content) return resp_data def repo_delete_packages_by_key(self, repo_name, package_key_list): url = self.cfg['route_repo'] + repo_name + '/packages' data = { 'PackageRefs': package_key_list } r = requests.delete(url, data=json.dumps(data), headers=self.headers) resp_data = json.loads(r.content) return resp_data
MIT License
google/uncertainty-baselines
uncertainty_baselines/datasets/base.py
BaseDataset.__init__
python
def __init__( self, name: str, dataset_builder: tfds.core.DatasetBuilder, split: Union[float, str, tfds.Split], seed: Optional[Union[int, tf.Tensor]] = None, is_training: Optional[bool] = None, shuffle_buffer_size: Optional[int] = None, num_parallel_parser_calls: int = tf.data.experimental.AUTOTUNE, drop_remainder: bool = False, fingerprint_key: Optional[str] = None, download_data: bool = False, decoders: Optional[Dict[str, tfds.decode.Decoder]] = None, cache: bool = False): self.name = name self._split = split if seed is None: self._seed = tf.random.uniform((2,), maxval=int(1e10), dtype=tf.int32) elif isinstance(seed, int): self._seed = (seed, seed + 1) elif isinstance(seed, tf.Tensor) and tf.shape(seed).shape == 0: self._seed = tf.stack([seed, seed+1]) else: self._seed = seed self._num_parallel_parser_calls = num_parallel_parser_calls self._drop_remainder = drop_remainder self._download_data = download_data self._decoders = decoders self._cache = cache known_splits = [ 'train', 'validation', 'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST ] if is_training is None: if split in known_splits: is_training = split in ['train', tfds.Split.TRAIN] else: raise ValueError( 'Received ambiguous split {}, must set is_training for splits other ' 'than "train", "validation", "test".'.format(split)) self._is_training = is_training if 'train' in dataset_builder.info.splits and shuffle_buffer_size is None: num_train_examples = dataset_builder.info.splits['train'].num_examples self._shuffle_buffer_size = num_train_examples else: self._shuffle_buffer_size = shuffle_buffer_size super().__init__( dataset_builder=dataset_builder, fingerprint_key=fingerprint_key, split=self._split, label_key='label') self._enumerate_id_key = '_enumerate_added_per_step_id' self._add_fingerprint_key = False if self._fingerprint_key is None: self._fingerprint_key = '_enumerate_added_example_id' self._add_fingerprint_key = True
Create a tf.data.Dataset builder. Args: name: the name of this dataset. dataset_builder: the TFDS dataset builder used to read examples given a split. split: a dataset split, either a custom tfds.Split or one of the tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string names. For Criteo it can also be a float to represent the level of data augmentation. For speech commands it can be a tuple of a string and float for shifted data splits. seed: the seed used as a source of randomness. is_training: whether or not `split` is the training split. This is necessary because tf.data subsplits can sometimes be derived from the training split, such as when using part of the training split as a validation set, and this complicates calculating `is_training` in these cases. Only required when the passed split is not one of ['train', 'validation', 'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST], otherwise it is set automatically. shuffle_buffer_size: the number of example to use in the shuffle buffer for tf.data.Dataset.shuffle(). num_parallel_parser_calls: the number of parallel threads to use while preprocessing in tf.data.Dataset.map(). drop_remainder: whether or not to drop the last batch of data if the number of points is not exactly equal to the batch size. fingerprint_key: The name of the feature holding a string that will be used to create an element id using a fingerprinting function. If None, then `ds.enumerate()` is added before the `ds.map(preprocessing_fn)` is called and an `id` field is added to the example Dict. download_data: Whether or not to download data before loading. decoders: Optional TFDS decoders to provide to `dataset_builder.as_dataset`, the same as passed to `tfds.load`. cache: Whether or not to cache the dataset after it is returned from dataset_builder.as_dataset(...) (before preprocessing is applied).
https://github.com/google/uncertainty-baselines/blob/d37c17c4b08a88d6546bbf299b59127a03398404/uncertainty_baselines/datasets/base.py#L87-L187
import logging from typing import Callable, Dict, Optional, Sequence, Type, TypeVar, Union from robustness_metrics.common import ops from robustness_metrics.common import types from robustness_metrics.datasets import tfds as robustness_metrics_base import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds PreProcessFn = Callable[ [Union[int, tf.Tensor, Sequence[tf.Tensor], types.Features]], types.Features] _EnumeratedPreProcessFn = Callable[ [int, Union[int, tf.Tensor, Sequence[tf.Tensor], types.Features]], types.Features] def get_validation_percent_split( dataset_builder, validation_percent, split, test_split=tfds.Split.TEST): if validation_percent < 0.0 or validation_percent >= 1.0: raise ValueError( 'validation_percent must be in [0, 1), received {}.'.format( validation_percent)) if validation_percent == 0.: train_split = tfds.Split.TRAIN validation_split = tfds.Split.VALIDATION else: num_train_examples = dataset_builder.info.splits['train'].num_examples num_validation_examples = int(num_train_examples * validation_percent) train_split = tfds.core.ReadInstruction( 'train', to=-num_validation_examples, unit='abs') validation_split = tfds.core.ReadInstruction( 'train', from_=-num_validation_examples, unit='abs') if split in ['train', tfds.Split.TRAIN]: new_split = train_split elif split in ['validation', tfds.Split.VALIDATION]: new_split = validation_split elif split in ['test', tfds.Split.TEST]: new_split = test_split elif isinstance(split, str): raise ValueError( 'Invalid string name for split, must be one of ["train", "validation"' ', "test"], received {}.'.format(split)) else: new_split = split return new_split class BaseDataset(robustness_metrics_base.TFDSDataset):
Apache License 2.0
izgzhen/nightly
src/launcher.py
Launcher.process_job_to_launch
python
def process_job_to_launch(self, job): job_name = job["name"] assert "enabled" in job, job if not job["enabled"]: logger.info(f"Job {job_name} is not enabled, thus skipped") return if "cwd" not in job: job["cwd"] = None if "env" not in job: job["env"] = {} if job["schedule"] in ["nightly"]: last_run = self.db.get_last_run_started(job) if last_run is not None: now = datetime.datetime.now() interval = schedule_to_interval(job["schedule"]) if last_run + interval > now: return None elif job["schedule"] == "daemon": jobs = self.db.fetch_running_jobs_of(job) if len(jobs) > 0: assert len(jobs) == 1 return None elif job["schedule"] == "once": pass else: raise Exception("Unknown schedule: " + job["schedule"]) storage = self.get_storage(job["storage_type"]) if storage is None: return None if "host" in job: compute = self.get_compute_by_host(job["host"]) else: compute = self.get_compute(job["compute_type"]) if compute is None: logger.warn("Can't find compute resource for " + str(job)) return None self.launch_job(job, compute, storage) self.db.commit()
Process job for further scheduling needs
https://github.com/izgzhen/nightly/blob/83c4aec63b450f4479b7813a61e5807e169a2e58/src/launcher.py#L104-L149
import yaml import json import datetime import tempfile import time import sys import os from msbase.utils import getenv, datetime_str from msbase.logging import logger from common import get_jobs_config from model import DB from resource import Resource from notif import send_text def schedule_to_interval(sched): if sched == "nightly": return datetime.timedelta(days=1) raise Exception("Unknown schedule: " + sched) class Launcher(object): def __init__(self): super().__init__() self.resources_config = yaml.safe_load(open(getenv("CONFIG_RESOURCES"), "r")) self.db = DB() def get_storage(self, type_: str): for s in self.resources_config["storage"]: if s["type"] == type_: return s def get_master_as_compute(self): return self.get_compute_by_host(self.resources_config["master"]["host"]) def get_master_as_storage(self): return self.get_storage_by_host(self.resources_config["master"]["host"]) def get_compute(self, type_: str): for s in self.resources_config["compute"]: if s["type"] == type_: return s def get_compute_by_host(self, host: str): for s in self.resources_config["compute"]: if s["host"] == host: return s def get_storage_by_host(self, host: str): for s in self.resources_config["storage"]: if s["host"] == host: return s def create_new_job_row(self, job, compute, storage): job_started = datetime.datetime.now() return self.db.insert_row_get_id({ "job_name": job["name"], "cwd": job["cwd"], "env": json.dumps(job["env"]), "job_steps": json.dumps(job["steps"]), "job_persisted": json.dumps(job["persisted"]), "job_started": job_started, "job_status": "running", "compute": json.dumps(compute), "storage": json.dumps(storage) }, "log") def launch_job(self, job, compute, storage): job_id = self.create_new_job_row(job, compute, storage) task_file = tempfile.NamedTemporaryFile(mode="w", delete=False) task_file.write(json.dumps({ "cwd": job["cwd"], "env": job["env"], "steps": job["steps"] })) task_file.close() resource = Resource(compute, storage) runner_dir = resource.compute["nightly_tmp"] resource.scp_to(task_file.name, runner_dir + "/%s-input.json" % job_id, resource.compute) resource.scp_to("src/runner.py", runner_dir + "/runner.py", resource.compute) resource.ssh_exec_on_node("cd " + runner_dir + "; nohup python3 runner.py %s > /dev/null 2>&1 &" % job_id, resource.compute) try: pid = int(resource.ssh_exec_on_node("sleep 3; cat " + runner_dir + "/" + str(job_id) + "-pid.txt", resource.compute).strip()) except Exception: import pdb, traceback extype, value, tb = sys.exc_info() traceback.print_exc() send_text("Exception -- in PDB now") pdb.post_mortem(tb) self.db.update_pid(job_id, pid) logger.info("Launched job (job_id: %s, PID: %s): %s" % (job_id, pid, job["name"])) time.sleep(1)
Apache License 2.0
luci/recipes-py
recipe_modules/swarming/api.py
TaskResult.url
python
def url(self): return 'https://cas-viewer.appspot.com/{0}/blobs/{1}/tree'.format( self.instance, self.digest, )
The URL of the associated CAS UI page.
https://github.com/luci/recipes-py/blob/32f0255a6910af47c6cb35546032ae4d60fe9a92/recipe_modules/swarming/api.py#L962-L967
from __future__ import absolute_import import base64 import collections import contextlib import copy from future.utils import iteritems from past.types import basestring import six from .state import TaskState from recipe_engine import recipe_api DEFAULT_CIPD_VERSION = 'git_revision:25296136e6cb014ce9ae1ff61d43728246ae43b8' class TaskRequest(object): ResultDBCfg = collections.namedtuple('ResultDBCfg', ['enable']) def __init__(self, api): self._api = api self._name = '' self._priority = 200 self._service_account = '' self._slices = [self.TaskSlice(api)] self._user = None self._tags = None self._realm = api.context.realm self._resultdb = self.ResultDBCfg(enable=False) def _copy(self): api = self._api self._api = None slices = self._slices self._slices = [] ret = copy.deepcopy(self) ret._api = api ret._slices.extend([s._copy() for s in slices]) self._api = api self._slices = slices return ret def __getitem__(self, idx): return self._slices[idx] def __len__(self): return len(self._slices) def add_slice(self, slice_obj): ret = self._copy() ret._slices.append(slice_obj) return ret def with_slice(self, idx, slice_obj): assert isinstance(slice_obj, self.TaskSlice) assert 0 <= idx < len(self._slices) ret = self._copy() ret._slices[idx] = slice_obj return ret @property def name(self): return self._name def with_name(self, name): assert isinstance(name, basestring) ret = self._copy() ret._name = name return ret @property def priority(self): return self._priority def with_priority(self, priority): assert isinstance(priority, int) ret = self._copy() ret._priority = priority return ret @property def realm(self): return self._realm def with_realm(self, realm): assert isinstance(realm, basestring) ret = self._copy() ret._realm = realm return ret @property def resultdb(self): return self._resultdb def with_resultdb(self): ret = self._copy() ret._resultdb = self.ResultDBCfg(enable=True) return ret @property def service_account(self): return self._service_account def with_service_account(self, account): assert isinstance(account, basestring) ret = self._copy() ret._service_account = account return ret @property def user(self): return self._user def with_user(self, user): assert isinstance(user, basestring) ret = self._copy() ret._user = user return ret @property def tags(self): return self._tags def with_tags(self, tags): assert isinstance(tags, dict) tags_list = [] for tag, values in iteritems(tags): assert isinstance(tag, basestring) assert isinstance(values, list) for value in values: assert isinstance(value, basestring) tags_list.append('%s:%s' % (tag, value)) ret = self._copy() ret._tags = sorted(tags_list) return ret def _from_jsonish(self, d): tags = collections.defaultdict(list) for tag in d.get('tags', ()): k, v = tag.split(':', 1) tags[k].append(v) ret = (self. with_name(d['name']). with_priority(int(d['priority'])). with_service_account(d['service_account']). with_tags(tags)) if 'user' in d: ret = ret.with_user(d['user']) if 'resultdb' in d: ret = ret.with_resultdb() if 'realm' in d: ret = ret.with_realm(d['realm']) ret._slices = [ self.TaskSlice(self._api)._from_jsonish(ts) for ts in d['task_slices'] ] return ret def to_jsonish(self): realm = self.realm if self.resultdb.enable and not realm: realm = self._api.buildbucket.builder_realm ret = { 'name': self.name, 'priority': str(self.priority), 'service_account': self.service_account, 'task_slices': [task_slice.to_jsonish() for task_slice in self._slices], } if self.resultdb.enable: ret['resultdb'] = self.resultdb._asdict() if self.user: ret['user'] = self.user if self.tags: ret['tags'] = self.tags if realm: ret['realm'] = realm return ret class TaskSlice(object): def __init__(self, api): self._cipd_ensure_file = api.cipd.EnsureFile() self._command = [] self._relative_cwd = "" self._dimensions = {} self._env_prefixes = {} self._env_vars = {} self._execution_timeout_secs = 1200 self._expiration_secs = 300 self._wait_for_capacity = False self._grace_period_secs = 30 self._idempotent = False self._io_timeout_secs = 60 self._named_caches = {} self._outputs = [] self._secret_bytes = b'' self._cas_input_root = '' self._lower_priority = False self._containment_type = 'NONE' self._limit_processes = 0 self._limit_total_committed_memory = 0 self._api = api def _copy(self): api = self._api self._api = None ret = copy.deepcopy(self) ret._api = api self._api = api return ret @property def command(self): return self._command[:] def with_command(self, cmd): assert isinstance(cmd, list) assert all(isinstance(s, basestring) for s in cmd) ret = self._copy() ret._command = cmd return ret @property def relative_cwd(self): return self._relative_cwd def with_relative_cwd(self, relative_cwd): assert isinstance(relative_cwd, basestring) ret = self._copy() ret._relative_cwd = relative_cwd return ret @property def cas_input_root(self): return self._cas_input_root def with_cas_input_root(self, digest): assert isinstance(digest, basestring) assert digest.count('/') == 1 ret = self._copy() ret._cas_input_root = digest return ret @property def dimensions(self): return copy.deepcopy(self._dimensions) def with_dimensions(self, **kwargs): ret = self._copy() ret._dimensions = self.dimensions for k, v in iteritems(kwargs): assert isinstance(k, basestring) and (isinstance(v, basestring) or v is None) if v is None: ret._dimensions.pop(k, None) else: ret._dimensions[k] = v return ret @property def cipd_ensure_file(self): return copy.deepcopy(self._cipd_ensure_file) def with_cipd_ensure_file(self, ensure_file): assert isinstance(ensure_file, self._api.cipd.EnsureFile) ret = self._copy() ret._cipd_ensure_file = ensure_file return ret @property def outputs(self): return copy.copy(self._outputs) def with_outputs(self, outputs): assert isinstance(outputs, list) assert all(isinstance(output, basestring) for output in outputs) ret = self._copy() ret._outputs = outputs return ret @property def env_vars(self): return copy.deepcopy(self._env_vars) def with_env_vars(self, **kwargs): ret = self._copy() ret._env_vars = self.env_vars for k, v in iteritems(kwargs): assert (isinstance(k, basestring) and (isinstance(v, basestring) or v is None)) if v is None: ret._env_vars.pop(k, None) else: ret._env_vars[k] = v return ret @property def env_prefixes(self): return copy.deepcopy(self._env_prefixes) def with_env_prefixes(self, **kwargs): ret = self._copy() ret._env_prefixes = self.env_prefixes for k, v in iteritems(kwargs): assert (isinstance(k, basestring) and (isinstance(v, list) or v is None)), ( '%r must be a string and %r None or a list of strings' % (k, v)) if v is None: ret._env_prefixes.pop(k, None) else: assert all(isinstance(prefix, basestring) for prefix in v) ret._env_prefixes.setdefault(k, []).extend(v) return ret @property def expiration_secs(self): return self._expiration_secs def with_expiration_secs(self, secs): assert isinstance(secs, int) and secs >= 0 ret = self._copy() ret._expiration_secs = secs return ret @property def wait_for_capacity(self): return self._wait_for_capacity def with_wait_for_capacity(self, b): assert isinstance(b, bool) ret = self._copy() ret._wait_for_capacity = b return ret @property def io_timeout_secs(self): return self._io_timeout_secs def with_io_timeout_secs(self, secs): assert isinstance(secs, int) and secs >= 0 ret = self._copy() ret._io_timeout_secs = secs return ret @property def execution_timeout_secs(self): return self._execution_timeout_secs def with_execution_timeout_secs(self, secs): assert isinstance(secs, int) and secs >= 0 ret = self._copy() ret._execution_timeout_secs = secs return ret @property def grace_period_secs(self): return self._grace_period_secs def with_grace_period_secs(self, secs): assert isinstance(secs, int) and secs >= 0 ret = self._copy() ret._grace_period_secs = secs return ret @property def idempotent(self): return self._idempotent def with_idempotent(self, idempotent): assert isinstance(idempotent, bool) ret = self._copy() ret._idempotent = idempotent return ret @property def secret_bytes(self): return self._secret_bytes def with_secret_bytes(self, data): assert isinstance(data, six.binary_type) ret = self._copy() ret._secret_bytes = data return ret @property def lower_priority(self): return self._lower_priority def with_lower_priority(self, lower_priority): assert isinstance(lower_priority, bool) ret = self._copy() ret._lower_priority = lower_priority return ret @property def containment_type(self): return self._containment_type def with_containment_type(self, containment_type): assert containment_type in ('NONE', 'AUTO', 'JOB_OBJECT') ret = self._copy() ret._containment_type = containment_type return ret @property def limit_processes(self): return self._limit_processes def with_limit_processes(self, limit_processes): assert isinstance(limit_processes, int) ret = self._copy() ret._limit_processes = limit_processes return ret @property def limit_total_committed_memory(self): return self._limit_total_committed_memory def with_limit_total_committed_memory(self, limit_total_committed_memory): assert isinstance(limit_total_committed_memory, int) ret = self._copy() ret._limit_total_committed_memory = limit_total_committed_memory return ret @property def named_caches(self): return self._named_caches def with_named_caches(self, named_caches): assert isinstance(named_caches, dict) ret = self._copy() ret._named_caches.update(named_caches) return ret def _from_jsonish(self, d): p = d['properties'] containment = p['containment'] def kv_list_to_dict(kv_list): ret = {} for kv in kv_list: ret[kv['key']] = kv['value'] return ret ret = (self. with_command(p['command']). with_relative_cwd(p['relative_cwd']). with_dimensions(**kv_list_to_dict(p['dimensions'])). with_outputs(p['outputs']). with_env_vars(**kv_list_to_dict(p['env'])). with_env_prefixes(**kv_list_to_dict(p['env_prefixes'])). with_execution_timeout_secs(int(p['execution_timeout_secs'])). with_grace_period_secs(int(p['grace_period_secs'])). with_idempotent(p['idempotent']). with_io_timeout_secs(int(p['io_timeout_secs'])). with_lower_priority(containment['lower_priority']). with_containment_type(containment['containment_type']). with_limit_processes(int(containment['limit_processes'])). with_limit_total_committed_memory( int(containment['limit_total_committed_memory']))) if 'cas_input_root' in p: digest = p['cas_input_root']['digest'] ret = ret.with_cas_input_root(digest['hash'] + '/' + digest['size_bytes']) if 'secret_bytes' in p: ret = ret.with_secret_bytes(base64.b64decode(p['secret_bytes'])) if 'cipd_input' in p: ensure_file = self._api.cipd.EnsureFile() for pkg in p['cipd_input']['packages']: ensure_file.add_package( pkg['package_name'], pkg['version'], subdir=pkg['path']) ret = ret.with_cipd_ensure_file(ensure_file) if 'caches' in p: ret = ret.with_named_caches({c['name']: c['path'] for c in p['caches']}) if 'wait_for_capacity' in d: ret = ret.with_wait_for_capacity(d['wait_for_capacity']) return ret.with_expiration_secs(int(d['expiration_secs'])) def to_jsonish(self): dims = self.dimensions assert len(dims) >= 1 and dims['pool'] properties = { 'command': self.command, 'relative_cwd': self.relative_cwd, 'dimensions': [{ 'key': k, 'value': v } for k, v in sorted(iteritems(dims))], 'outputs': self.outputs, 'env': [{ 'key': k, 'value': v } for k, v in sorted(iteritems(self.env_vars))], 'env_prefixes': [{ 'key': k, 'value': v } for k, v in sorted(iteritems(self.env_prefixes))], 'execution_timeout_secs': str(self.execution_timeout_secs), 'grace_period_secs': str(self.grace_period_secs), 'idempotent': self.idempotent, 'io_timeout_secs': str(self.io_timeout_secs), 'containment': { 'lower_priority': self.lower_priority, 'containment_type': self.containment_type, 'limit_processes': str(self.limit_processes), 'limit_total_committed_memory': str(self.limit_total_committed_memory), }, } if self.cas_input_root: h, b = self.cas_input_root.split('/') properties['cas_input_root'] = { 'cas_instance': self._api.cas.instance, 'digest': { 'hash': h, 'size_bytes': b, }, } if self.secret_bytes: properties['secret_bytes'] = base64.b64encode( self.secret_bytes).decode() if self.cipd_ensure_file.packages: properties['cipd_input'] = { 'packages': [{ 'package_name': pkg.name, 'path': path or '.', 'version': pkg.version, } for path in sorted(self.cipd_ensure_file.packages) for pkg in self.cipd_ensure_file.packages[path]] } if self._named_caches: properties['caches'] = [{ 'name': name, 'path': path } for name, path in sorted(iteritems(self.named_caches))] return { 'expiration_secs': str(self.expiration_secs), 'wait_for_capacity': self.wait_for_capacity, 'properties': properties, } class TaskRequestMetadata(object): def __init__(self, swarming_server, task_json): self._task_json = task_json self._swarming_server = swarming_server @property def name(self): return self._task_json['request']['name'] @property def id(self): return self._task_json['task_id'] @property def task_ui_link(self): return '%s/task?id=%s' % (self._swarming_server, self.id) @property def invocation(self): return self._task_json.get('task_result', {}).get('resultdb_info', {}).get('invocation') class TaskResult(object): class CasOutputs(object): def __init__(self, digest, instance): self._digest = digest self._instance = instance @property def digest(self): return self._digest @property def instance(self): return self._instance @property
Apache License 2.0
ndf-zz/asfv1
asfv1.py
fv1parse.parsewarn
python
def parsewarn(self, msg, line=None): if line is None: line = self.prevline self.dowarn('warning: {} on line {}'.format(msg, line))
Emit parse warning.
https://github.com/ndf-zz/asfv1/blob/0a1c7badf9d970d785c267c8bf2d1e5c5df3aac1/asfv1.py#L872-L876
from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import argparse import sys import shlex import struct VERSION = '1.2.7' PROGLEN = 128 DELAYSIZE = 32767 MAXERR = 10 REF_S1_14 = 2.0**14 MIN_S1_14 = -2.0**1 MAX_S1_14 = (2.0**(1+14)-1.0)/REF_S1_14 REF_S1_9 = 2.0**9 MIN_S1_9 = -2.0**1 MAX_S1_9 = (2.0**(1+9)-1.0)/REF_S1_9 REF_S_10 = 2.0**10 MIN_S_10 = -2.0**0 MAX_S_10 = (2.0**(0+10)-1.0)/REF_S_10 REF_S_15 = 2.0**15 MIN_S_15 = -2.0**0 MAX_S_15 = (2.0**(0+15)-1.0)/REF_S_15 REF_S4_6 = 2.0**6 MIN_S4_6 = -2.0**4 MAX_S4_6 = (2.0**(4+6)-1.0)/REF_S4_6 REF_S_23 = 2.0**23 MIN_S_23 = -2.0**0 MAX_S_23 = (2.0**(0+23)-1.0)/REF_S_23 M1 = 0x01 M2 = 0x03 M5 = 0x1f M6 = 0x3f M8 = 0xff M9 = 0x1ff M11 = 0x7ff M14 = 0x3fff M15 = 0x7fff M16 = 0xffff M24 = 0xffffff M27 = 0x7ffffff M32 = 0xffffffff def quiet(msg): pass def warning(msg): print(msg, file=sys.stderr) def error(msg): print(msg, file=sys.stderr) def bintoihex(buf, spos=0x0000, width=4): c = 0 olen = len(buf) ret = "" while(c < olen): rem = olen-c if rem > width: rem = width sum = rem adr = c + spos l = ':{0:02X}{1:04X}00'.format(rem,adr) sum += ((adr>>8)&M8)+(adr&M8) for j in range(0,rem): nb = buf[c+j] l += '{0:02X}'.format(nb) sum = (sum + nb)&M8 l += '{0:02X}'.format((~sum+1)&M8) ret += l + '\n' c += rem ret += ':00000001FF\n' return ret op_tbl = { 'RDA': [0b00000, (M15,5),(M11,21)], 'RMPA': [0b00001, (M11,21)], 'WRA': [0b00010, (M15,5),(M11,21)], 'WRAP': [0b00011, (M15,5),(M11,21)], 'RDAX': [0b00100, (M6,5),(M16,16)], 'RDFX': [0b00101, (M6,5),(M16,16)], 'LDAX': [0b00101, (M6,5)], 'WRAX': [0b00110, (M6,5),(M16,16)], 'WRHX': [0b00111, (M6,5),(M16,16)], 'WRLX': [0b01000, (M6,5),(M16,16)], 'MAXX': [0b01001, (M6,5),(M16,16)], 'ABSA': [0b01001, ], 'MULX': [0b01010, (M6,5)], 'LOG': [0b01011, (M16,16),(M11,5)], 'EXP': [0b01100, (M16,16),(M11,5)], 'SOF': [0b01101, (M16,16),(M11,5)], 'AND': [0b01110, (M24,8)], 'CLR': [0b01110, ], 'OR' : [0b01111, (M24,8)], 'XOR': [0b10000, (M24,8)], 'NOT': [0b10000, (M24,8)], 'SKP': [0b10001, (M5,27),(M6,21)], 'JMP': [0b10001, (M5,27),(M6,21)], 'NOP': [0b10001, ], 'WLDS': [0b10010, (M1,29),(M9,20),(M15,5)], 'WLDR': [0b10010, (M2,29),(M16,13),(M2,5)], 'JAM': [0b10011, (M2,6)], 'CHO': [0b10100, (M2,30),(M2,21),(M6,24),(M16,5)], 'RAW': [0b00000, (M32,0)], } def op_gen(mcode): gen = op_tbl[mcode[0]] ret = gen[0] nargs = len(gen) i = 1 while i < nargs: if i < len(mcode): ret |= (mcode[i]&gen[i][0]) << gen[i][1] i += 1 return ret class fv1parse(object): def __init__(self, source=None, clamp=True, spinreals=False, wfunc=None, efunc=None): self.program = bytearray(512) self.doclamp = clamp self.spinreals = spinreals self.dowarn = wfunc self.doerror = efunc self.delaymem = 0 self.prevline = 0 self.sline = 0 self.icnt = 0 self.sym = None self.ecount = 0 self.source = source.split('\n') self.linebuf = [] self.pl = [] self.mem = {} self.jmptbl = { } self.symtbl = { 'SIN0_RATE': 0x00, 'SIN0_RANGE': 0x01, 'SIN1_RATE': 0x02, 'SIN1_RANGE': 0x03, 'RMP0_RATE': 0x04, 'RMP0_RANGE': 0x05, 'RMP1_RATE': 0x06, 'RMP1_RANGE': 0x07, 'POT0': 0x10, 'POT1': 0x11, 'POT2': 0x12, 'ADCL': 0x14, 'ADCR': 0x15, 'DACL': 0x16, 'DACR': 0x17, 'ADDR_PTR': 0x18, 'REG0': 0x20, 'REG1': 0x21, 'REG2': 0x22, 'REG3': 0x23, 'REG4': 0x24, 'REG5': 0x25, 'REG6': 0x26, 'REG7': 0x27, 'REG8': 0x28, 'REG9': 0x29, 'REG10': 0x2a, 'REG11': 0x2b, 'REG12': 0x2c, 'REG13': 0x2d, 'REG14': 0x2e, 'REG15': 0x2f, 'REG16': 0x30, 'REG17': 0x31, 'REG18': 0x32, 'REG19': 0x33, 'REG20': 0x34, 'REG21': 0x35, 'REG22': 0x36, 'REG23': 0x37, 'REG24': 0x38, 'REG25': 0x39, 'REG26': 0x3a, 'REG27': 0x3b, 'REG28': 0x3c, 'REG29': 0x3d, 'REG30': 0x3e, 'REG31': 0x3f, 'SIN0': 0x00, 'SIN1': 0x01, 'RMP0': 0x02, 'RMP1': 0x03, 'RDA': 0x00, 'SOF': 0x02, 'RDAL': 0x03, 'SIN': 0x00, 'COS': 0x01, 'REG': 0x02, 'COMPC': 0x04, 'COMPA': 0x08, 'RPTR2': 0x10, 'NA': 0x20, 'RUN': 0x10, 'ZRC': 0x08, 'ZRO': 0x04, 'GEZ': 0x02, 'NEG': 0x01, } def __mkopcodes__(self): proglen = len(self.pl) self.dowarn('info: Read {} instructions from input'.format( proglen)) icnt = proglen while icnt < PROGLEN: self.pl.append({'cmd':['SKP',0x00,0x00], 'addr':icnt, 'target':None}) icnt += 1 oft = 0 for i in self.pl: struct.pack_into('>I', self.program, oft, op_gen(i['cmd'])) oft += 4 def __register__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic reg = self.__expression__() if int(reg) == reg: reg = int(reg) if reg < 0 or reg > 63: self.parseerror('Register {0:#x} out of range'.format(reg) + xtra) reg = 0 else: self.parseerror('Invalid register {}'.format(reg) + xtra) reg = 0 return reg def __d_15__(self,mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic oft = self.__expression__() if oft < MIN_S_15 or oft > MAX_S_15: oft = int(round(oft)) if oft < -0x8000 or oft > M15: if self.doclamp: if oft < -0x8000: oft = -0x8000 elif oft > M15: oft = M15 self.parsewarn('Address clamped to {0:#x}'.format(oft) + xtra) else: self.parseerror('Invalid address {0:#x}'.format(oft) + xtra) oft = 0 else: oft = int(round(oft * REF_S_15)) return oft def __offset__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic oft = self.__expression__() if int(oft) == oft: oft = int(oft) if oft < 0 or oft > M6: self.parseerror('Offset {} out of range'.format(oft) + xtra) oft = 0 else: self.parseerror('Invalid offset {}'.format(oft) + xtra) oft = 0 return oft def __condition__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic cond = self.__expression__() if int(cond) == cond: cond = int(cond) if cond < 0 or cond > M5: self.parseerror('Condition {0:#x} out of range'.format( cond) + xtra) cond = 0 else: self.parseerror('Invalid condition {}'.format(cond) + xtra) cond = 0 return cond def __chotype__(self): chotype = self.sym['stxt'] self.__next__() if chotype in ['RDA','SOF','RDAL']: chotype = self.symtbl[chotype] else: self.parseerror('Invalid CHO type {}'.format(chotype)) chotype = 0 return chotype def __choflags__(self, lfo=None): flags = self.__expression__() if int(flags) == flags: flags = int(flags) if flags < 0 or flags > M6: self.parseerror('Invalid flags {0:#x} for CHO'.format(flags)) flags = 0 else: self.parseerror('Invalid flags {} for CHO'.format(flags)) flags = 0 oflags = flags if lfo&0x02: flags = oflags & 0x3e if oflags != flags: self.parsewarn('RMP flags set to {0:#x} for CHO'.format( flags)) else: flags = oflags & 0x0f if oflags != flags: self.parsewarn('SIN flags set to {0:#x} for CHO'.format( flags)) return flags def __s1_14__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M16: if self.doclamp: if arg < 0: arg = 0 elif arg > M16: arg = M16 self.parsewarn('S1_14 arg clamped to {0:#x}'.format(arg) + xtra) else: self.parseerror('S1_14 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S1_14 or arg > MAX_S1_14: if self.doclamp: if arg < MIN_S1_14: arg = MIN_S1_14 elif arg > MAX_S1_14: arg = MAX_S1_14 self.parsewarn('S1_14 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S1_14 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S1_14)) return arg def __s_10__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M11: if self.doclamp: if arg < 0: arg = 0 elif arg > M11: arg = M11 self.parsewarn('S_10 arg clamped to {0:#x}'.format( arg) + xtra) else: self.parseerror('S_10 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S_10 or arg > MAX_S_10: if self.doclamp: if arg < MIN_S_10: arg = MIN_S_10 elif arg > MAX_S_10: arg = MAX_S_10 self.parsewarn('S_10 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S_10 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S_10)) return arg def __s_15a__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if self.spinreals and arg == int(arg): arg = int(arg) if isinstance(arg, int): if arg < 0 or arg > M16: if self.doclamp: if arg < 0: arg = 0 elif arg > M16: arg = M16 self.parsewarn('S_15 arg clamped to {0:#x}'.format( arg) + xtra) else: self.parseerror('S_15 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S_15 or arg > MAX_S_15: if self.doclamp: if arg < MIN_S_15: arg = MIN_S_15 elif arg > MAX_S_15: arg = MAX_S_15 self.parsewarn('S_15 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S_15 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S_15)) return arg def __u_32__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M32: if self.doclamp: if arg < 0: arg = 0 elif arg > M32: arg = M32 self.parsewarn('U_32 arg clamped to {0:#x}'.format(arg) + xtra) else: self.parseerror('U_32 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: self.parseerror('Invalid U_32 arg {}'.format(arg) + xtra) arg = 0 return arg def __s_23__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M24: if self.doclamp: if arg < 0: arg = 0 elif arg > M24: arg = M24 self.parsewarn('S_23 arg clamped to {0:#x}'.format( arg) + xtra) else: self.parseerror('S_23 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S_23 or arg > MAX_S_23: if self.doclamp: if arg < MIN_S_23: arg = MIN_S_23 elif arg > MAX_S_23: arg = MAX_S_23 self.parsewarn('S_23 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S_23 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S_23)) return arg def __s1_9__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M11: if self.doclamp: if arg < 0: arg = 0 elif arg > M11: arg = M11 self.parsewarn('S1_9 arg clamped to {0:#x}'.format( arg) + xtra) else: self.parseerror('S1_9 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S1_9 or arg > MAX_S1_9: if self.doclamp: if arg < MIN_S1_9: arg = MIN_S1_9 elif arg > MAX_S1_9: arg = MAX_S1_9 self.parsewarn('S1_9 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S1_9 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S1_9)) return arg def __s4_6__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic arg = self.__expression__() if isinstance(arg, int): if arg < 0 or arg > M11: if self.doclamp: if arg < 0: arg = 0 elif arg > M11: arg = M11 self.parsewarn('S4_6 arg clamped to {0:#x}'.format( arg) + xtra) else: self.parseerror('S4_6 arg {0:#x} out of range'.format( arg) + xtra) arg = 0 else: if arg < MIN_S4_6 or arg > MAX_S4_6: if self.doclamp: if arg < MIN_S4_6: arg = MIN_S4_6 elif arg > MAX_S4_6: arg = MAX_S4_6 self.parsewarn('S4_6 arg clamped to {}'.format(arg) + xtra) else: self.parseerror('S4_6 arg {} out of range'.format(arg) + xtra) arg = 0 arg = int(round(arg * REF_S4_6)) return arg def __lfo__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic lfo = self.__expression__() if int(lfo) == lfo: lfo = int(lfo) if lfo < 0 or lfo > 3: self.parseerror('Invalid LFO {0:#x}'.format(lfo) + xtra) lfo = 0 else: self.parseerror('Invalid LFO {}'.format(lfo) + xtra) lfo = 0 return lfo def __lfo_sinfreq__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic freq = self.__expression__() if int(freq) == freq: freq = int(freq) if freq < 0 or freq > M9: if self.doclamp: if freq < 0: freq = 0 elif freq > M9: freq = M9 self.parsewarn('Frequency clamped to {0:#x}'.format(freq) + xtra) else: self.parseerror('Invalid frequency {0:#x}'.format(freq) + xtra) freq = 0 else: self.parseerror('Invalid frequency {}'.format(freq) + xtra) freq = 0 return freq def __lfo_rampfreq__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic freq = self.__expression__() if freq < -0.5 or freq > MAX_S_15: freq = int(round(freq)) if freq < -0x8000 or freq > M15: if self.doclamp: if freq < -0x8000: freq = -0x8000 elif freq > M15: freq = M15 self.parsewarn('Frequency clamped to {0:#x}'.format(freq) + xtra) else: self.parseerror('Invalid frequency {0:#x}'.format(freq) + xtra) freq = 0 else: freq = int(round(freq * REF_S_15)) return freq def __lfo_rampamp__(self, mnemonic=''): xtra = '' if mnemonic: xtra = ' for ' + mnemonic amp = self.__expression__() rampamps = {4096:0, 2048:1, 1024:2, 512:3, 0:0, 1:1, 2:2, 3:3} if int(amp) == amp: amp = int(amp) if amp in rampamps: amp = rampamps[amp] else: self.parseerror('Invalid amplitude {}'.format(amp) + xtra) amp = 0 else: self.parseerror('Invalid amplitude {}'.format(amp) + xtra) amp = 0 return amp def __next__(self): self.sym = None self.prevline = self.sline while self.sym is None: if len(self.linebuf) == 0: if len(self.source) > 0: self.sline += 1 llex = shlex.shlex(self.source.pop(0)) llex.commenters = ';' self.linebuf = [t for t in llex] else: self.sym = {'type': 'EOF', 'txt':None, 'stxt':None, 'val':None} if len(self.linebuf) > 0: stxt = self.linebuf[0].upper() if stxt in op_tbl: self.sym = {'type': 'MNEMONIC', 'txt': self.linebuf.pop(0), 'stxt': stxt, 'val': None} elif stxt in ['EQU', 'MEM']: self.sym = {'type': 'ASSEMBLER', 'txt': self.linebuf.pop(0), 'stxt': stxt, 'val': None} elif stxt in ['<','>','*','/']: optxt = self.linebuf.pop(0) if len(self.linebuf) > 0: if self.linebuf[0] == optxt: optxt += self.linebuf.pop(0) if optxt in ['<','>']: self.scanerror('Invalid operator {}'.format(optxt)) optxt += optxt self.sym = {'type': 'OPERATOR', 'txt': optxt, 'stxt': optxt, 'val': None} elif stxt in ['|','^','&','+','-','~','!','(',')','INT']: self.sym = {'type': 'OPERATOR', 'txt': self.linebuf.pop(0), 'stxt': stxt, 'val': None} elif stxt[0] in ['%', '$']: pref = self.linebuf.pop(0) base = 2 if pref == '$': base = 16 if len(self.linebuf) > 0: ht = self.linebuf.pop(0) ival = 0 try: ival = int(ht.replace('_',''),base) except: self.scanerror('Invalid integer literal {}'.format( pref+ht)) self.sym = {'type': 'INTEGER', 'txt': pref+ht, 'stxt': pref+ht, 'val': ival} else: self.scanerror('End of line scanning for integer') self.sym = {'type': 'INTEGER', 'txt': pref, 'stxt': pref, 'val': 0} elif stxt[0].isdigit(): intpart = self.linebuf.pop(0).lower() ival = 0.0 if len(self.linebuf) > 0 and self.linebuf[0] == '.': self.linebuf.pop(0) if len(self.linebuf) > 0: frac = self.linebuf.pop(0) if frac.endswith('e') and len(self.linebuf) > 0: epart = self.linebuf.pop(0) if epart in ['+','-'] and len(self.linebuf) > 0: epart += self.linebuf.pop(0) frac = frac+epart try: ival = float(intpart+'.'+frac) except: self.scanerror( 'Invalid numeric literal {}'.format( intpart+'.'+frac)) self.sym = {'type': 'FLOAT', 'txt': intpart+'.'+frac, 'stxt': intpart+'.'+frac, 'val': ival} else: self.scanerror('Invalid numeric literal') self.sym = {'type': 'FLOAT', 'txt': intpart+'.0', 'stxt': intpart+'.0', 'val': ival} elif self.spinreals and intpart in ['2', '1']: ival = float(intpart) self.sym = {'type': 'FLOAT', 'stxt': intpart+'.0', 'txt': intpart, 'val': ival} else: ival = 0 base = 10 if intpart.startswith('0x'): base = 16 elif intpart.startswith('0b'): base = 2 try: ival = int(intpart, base) except: self.scanerror('Invalid integer literal {}'.format( intpart)) self.sym = {'type': 'INTEGER', 'txt': intpart, 'stxt': intpart, 'val': ival} elif stxt[0].isalpha(): lbl = self.linebuf.pop(0) if len(self.linebuf) > 0 and self.linebuf[0] == ':': self.sym = {'type': 'TARGET', 'txt': lbl, 'stxt': stxt, 'val': None} self.linebuf.pop(0) else: mod = '' if len(self.linebuf) > 0 and self.linebuf[0] in [ '^','#']: if stxt+self.linebuf[0] in self.symtbl: mod = self.linebuf.pop(0) self.sym = {'type': 'LABEL', 'txt': lbl+mod, 'stxt': stxt+mod, 'val': None} elif stxt == ',': self.sym = {'type': 'ARGSEP', 'txt': self.linebuf.pop(0), 'stxt': stxt, 'val': None} elif self.linebuf[0] == '\ufeff': self.linebuf.pop(0) else: self.scanerror('Unrecognised input {}'.format( self.linebuf.pop(0))) def scanerror(self, msg): self.doerror('scan error: ' + msg + ' on line {}'.format(self.sline)) self.ecount += 1 if self.ecount > MAXERR: self.doerror('too many errors, aborting.') sys.exit(-1)
MIT License
markvdw/gpflow-inter-domain
GPflow/param.py
Parameterized.make_tf_array
python
def make_tf_array(self, X): count = 0 for dh in self.data_holders: dh.make_tf_array() for p in self.sorted_params: count += p.make_tf_array(X[count:]) return count
Distribute a flat tensorflow array amongst all the child parameter of this instance. X is a tensorflow placeholder. It gets passed to all the children of this class (that are Parameterized or Param objects), which then construct their tf_array variables from consecutive sections.
https://github.com/markvdw/gpflow-inter-domain/blob/0cf621e1896a3e1996f863b586c6cd2f795dd9f0/GPflow/param.py#L756-L769
from __future__ import absolute_import from contextlib import contextmanager from functools import wraps import numpy as np import pandas as pd import tensorflow as tf from . import transforms, session from ._settings import settings from .scoping import NameScoped float_type = settings.dtypes.float_type np_float_type = np.float32 if float_type is tf.float32 else np.float64 recompile_keys = ['prior', 'transform', 'fixed'] class Parentable(object): def __init__(self): self._parent = None @property def highest_parent(self): if self._parent is None: return self else: return self._parent.highest_parent @property def name(self): if self._parent is None: return 'unnamed' if isinstance(self._parent, ParamList): return 'item%i' % self._parent._list.index(self) matches = [key for key, value in self._parent.__dict__.items() if value is self] if len(matches) == 0: raise ValueError("mis-specified parent. This Param's\ _parent does not contain a reference to it.") if len(matches) > 1: raise ValueError("This Param appears to be doubly\ referenced by a parent") return matches[0] @property def long_name(self): if self._parent is None: return self.name return self._parent.long_name + '.' + self.name def __getstate__(self): d = self.__dict__.copy() d.pop('_parent') return d def __setstate__(self, d): self.__dict__.update(d) self._parent = None class Param(Parentable): def __init__(self, array, transform=transforms.Identity()): Parentable.__init__(self) self._array = np.asarray(np.atleast_1d(array), dtype=np_float_type) self.transform = transform self._tf_array = None self._log_jacobian = None self.prior = None self.fixed = False @property def value(self): return self._array.copy() def get_parameter_dict(self, d): d[self.long_name] = self.value def set_parameter_dict(self, d): self._array[...] = d[self.long_name] def get_samples_df(self, samples): if self.fixed: return pd.Series([self.value for _ in range(samples.shape[0])], name=self.long_name) start, _ = self.highest_parent.get_param_index(self) end = start + self.size samples = samples[:, start:end] samples = samples.reshape((samples.shape[0],) + (self.transform.free_state_size(self.shape),)) samples = np.atleast_1d(np.concatenate( [self.transform.forward(s).reshape((1,) + self.shape) for s in samples], 0)) return pd.Series([v for v in samples], name=self.long_name) def make_tf_array(self, free_array): if self.fixed: self._tf_array = tf.placeholder(dtype=float_type, shape=self._array.shape, name=self.name) self._log_jacobian = 0.0 return 0 free_size = self.transform.free_state_size(self.shape) x_free = free_array[:free_size] mapped_array = self.transform.tf_forward(x_free) self._tf_array = tf.reshape(mapped_array, self.shape) self._log_jacobian = self.transform.tf_log_jacobian(x_free) return free_size def get_free_state(self): if self.fixed: return np.empty((0,), np_float_type) return self.transform.backward(self.value.flatten()) def get_feed_dict_keys(self): d = {} if self.fixed: d[self] = self._tf_array return d def update_feed_dict(self, key_dict, feed_dict): if self.fixed: feed_dict[key_dict[self]] = self.value def set_state(self, x): if self.fixed: return 0 free_size = self.transform.free_state_size(self.shape) new_array = self.transform.forward(x[:free_size]).reshape(self.shape) assert new_array.shape == self.shape self._array[...] = new_array return free_size def randomize(self, distributions={}, skipfixed=True): if not (skipfixed and self.fixed): if self in distributions.keys(): self._array = distributions[self].sample(self.shape) else: try: self._array = self.prior.sample(self.shape) except AttributeError: randn = np.random.randn( self.transform.free_state_size(self.shape)) self._array = self.transform.forward(randn).reshape(self.shape) def build_prior(self): if self.prior is None: return tf.constant(0.0, float_type) elif self._tf_array is None: raise ValueError("tensorflow array has not been initialized") else: return self.prior.logp(self._tf_array) + self._log_jacobian def __setattr__(self, key, value): object.__setattr__(self, key, value) if key in recompile_keys: self.highest_parent._needs_recompile = True def __str__(self, prepend=''): return prepend + '\033[1m' + self.name + '\033[0m' + ' transform:' + str(self.transform) + ' prior:' + str(self.prior) + (' [FIXED]' if self.fixed else '') + '\n' + str(self.value) @property def size(self): return self._array.size @property def shape(self): return self._array.shape def _html_table_rows(self, name_prefix=''): html = "<tr>" html += "<td>{0}</td>".format(name_prefix + self.name) html += "<td>{0}</td>".format(str(self._array).replace('\n', '</br>')) html += "<td>{0}</td>".format(str(self.prior)) html += "<td>{0}</td>".format('[FIXED]' if self.fixed else str(self.transform)) html += "</tr>" return html def __getstate__(self): d = Parentable.__getstate__(self) for key in ['_tf_array', '_log_jacobian']: d.pop(key, None) return d def __setstate__(self, d): Parentable.__setstate__(self, d) self._log_jacobian = None self.fixed = self.fixed class DataHolder(Parentable): def __init__(self, array, on_shape_change='raise'): Parentable.__init__(self) dt = self._get_type(array) self._array = np.asarray(array, dtype=dt) assert on_shape_change in ['raise', 'pass', 'recompile'] self.on_shape_change = on_shape_change def _get_type(self, array): if any([array.dtype == np.dtype(t) for t in [np.float32, np.float64]]): return np_float_type elif any([array.dtype == np.dtype(t) for t in [np.int16, np.int32, np.int64]]): return np.int32 else: raise NotImplementedError("unknown dtype") def get_feed_dict_keys(self): return {self: self._tf_array} def update_feed_dict(self, key_dict, feed_dict): feed_dict[key_dict[self]] = self._array def __getstate__(self): d = Parentable.__getstate__(self) try: d.pop('_tf_array') except KeyError: pass return d def make_tf_array(self): self._tf_array = tf.placeholder(dtype=self._get_type(self._array), shape=[None] * self._array.ndim, name=self.name) def set_data(self, array): if self.shape == array.shape: self._array[...] = array else: if self.on_shape_change == 'raise': raise ValueError("The shape of this data must not change. \ (perhaps make the model again from scratch?)") elif self.on_shape_change == 'recompile': self._array = array.copy() self.highest_parent._needs_recompile = True elif self.on_shape_change == 'pass': self._array = array.copy() else: raise ValueError('invalid option') @property def value(self): return self._array.copy() @property def size(self): return self._array.size @property def shape(self): return self._array.shape def __str__(self, prepend='Data:'): return prepend + '\033[1m' + self.name + '\033[0m' + '\n' + str(self.value) class AutoFlow: def __init__(self, *tf_arg_tuples): self.tf_arg_tuples = tf_arg_tuples def __call__(self, tf_method): @wraps(tf_method) def runnable(instance, *np_args): storage_name = '_' + tf_method.__name__ + '_AF_storage' if hasattr(instance, storage_name): storage = getattr(instance, storage_name) else: storage = {} setattr(instance, storage_name, storage) storage['graph'] = tf.Graph() storage['session'] = session.get_session( graph=storage['graph'], output_file_name=settings.profiling.output_file_name + "_" + tf_method.__name__, output_directory=settings.profiling.output_directory, each_time=settings.profiling.each_time ) with storage['graph'].as_default(): storage['tf_args'] = [tf.placeholder(*a) for a in self.tf_arg_tuples] storage['free_vars'] = tf.placeholder(float_type, [None]) instance.make_tf_array(storage['free_vars']) with instance.tf_mode(): storage['tf_result'] = tf_method(instance, *storage['tf_args']) storage['feed_dict_keys'] = instance.get_feed_dict_keys() feed_dict = {} instance.update_feed_dict(storage['feed_dict_keys'], feed_dict) storage['session'].run(tf.global_variables_initializer(), feed_dict=feed_dict) feed_dict = dict(zip(storage['tf_args'], np_args)) feed_dict[storage['free_vars']] = instance.get_free_state() instance.update_feed_dict(storage['feed_dict_keys'], feed_dict) return storage['session'].run(storage['tf_result'], feed_dict=feed_dict) return runnable class Parameterized(Parentable): def __init__(self): Parentable.__init__(self) self.scoped_keys = [] self._tf_mode = False def get_parameter_dict(self, d=None): if d is None: d = {} for p in self.sorted_params: p.get_parameter_dict(d) return d def set_parameter_dict(self, d): for p in self.sorted_params: p.set_parameter_dict(d) def get_samples_df(self, samples): d = pd.DataFrame() for p in self.sorted_params: d = pd.concat([d, p.get_samples_df(samples)], axis=1) return d def __getattribute__(self, key): o = object.__getattribute__(self, key) try: if not object.__getattribute__(self, '_tf_mode'): return o except AttributeError: return o if isinstance(o, (Param, DataHolder)): return o._tf_array elif key in object.__getattribute__(self, 'scoped_keys'): return NameScoped(self.long_name + '.' + key)(o) return o def __setattr__(self, key, value): if key in self.__dict__.keys(): p = getattr(self, key) if isinstance(p, Param) and isinstance(value, (np.ndarray, float, int)): p._array[...] = value return if isinstance(p, (Param, Parameterized)) and isinstance(value, (Param, Parameterized)): p._parent = None if hasattr(self.highest_parent, '_needs_recompile'): self.highest_parent._needs_recompile = True if isinstance(p, DataHolder) and isinstance(value, np.ndarray): p.set_data(value) return if key is not '_parent' and isinstance(value, (Param, Parameterized)): if not hasattr(self, key) or not self.__getattribute__(key) is value: def _raise_for_existing_param(node): if node is value: raise ValueError('The Param(eterized) object {0} is already present in the tree'.format(value)) if isinstance(node, Parameterized): for child in node.sorted_params: _raise_for_existing_param(child) root = self.highest_parent _raise_for_existing_param(root) object.__setattr__(self, key, value) if isinstance(value, Parentable) and key is not '_parent': value._parent = self if key == '_needs_recompile': self._kill_autoflow() def _kill_autoflow(self): for key in list(self.__dict__.keys()): if key[0] == '_' and key[-11:] == '_AF_storage': if 'session' in getattr(self, key): getattr(self, key)['session'].close() delattr(self, key) [p._kill_autoflow() for p in self.sorted_params if isinstance(p, Parameterized)] def __getstate__(self): d = Parentable.__getstate__(self) for key in list(d.keys()): if key[0] == '_' and key[-11:] == '_AF_storage': d.pop(key) return d
Apache License 2.0
aoikuiyuyou/aoikrocketchaterrbot
src/aoikrocketchaterrbot/backends/aoikrocketchaterrbot.py
RocketChatUser.person
python
def person(self): return self._person
Get user name. :return: User name.
https://github.com/aoikuiyuyou/aoikrocketchaterrbot/blob/dc225e984f40e9c0b6ce98158387a0174b923e36/src/aoikrocketchaterrbot/backends/aoikrocketchaterrbot.py#L216-L223
from __future__ import absolute_import import logging import os from pprint import pformat from threading import Event import time from traceback import format_exc from MeteorClient import CollectionData from MeteorClient import MeteorClient from errbot.backends.base import OFFLINE from errbot.backends.base import ONLINE from errbot.backends.base import Identifier from errbot.backends.base import Message from errbot.backends.base import Person from errbot.backends.base import Presence from errbot.backends.base import Room from errbot.core import ErrBot def metaclass(meta): def class_decorator(cls): attrs_dict = cls.__dict__.copy() attrs_dict.pop('__dict__', None) attrs_dict.pop('__weakref__', None) return meta(cls.__name__, cls.__bases__, attrs_dict) return class_decorator class KeyAsValueMeta(type): def __init__(cls, name, bases, attrs): super(KeyAsValueMeta, cls).__init__(name, bases, attrs) for key, _ in attrs.items(): setattr(cls, key, key) key_as_value = metaclass(KeyAsValueMeta) @key_as_value class CONFIG_KEYS(object): SERVER_URI = '' LOGIN_USERNAME = '' LOGIN_PASSWORD = '' PATCH_METEOR_CLIENT = '' RECONNECT_ENABLED = '' HEARTBEAT_ENABLED = '' HEARTBEAT_INTERVAL = '' HEARTBEAT_FUNC = '' BOT_LOG_LEVEL = '' _CONFIG_OBJ_KEY = 'AOIKROCKETCHATERRBOT_CONFIG' _ENV_VAR_NAME_PREFIX = 'AOIKROCKETCHATERRBOT_' class RocketChatUser(Person): def __init__(self, person, client=None, nick=None, fullname=None): self._person = person self._client = client self._nick = nick or self._person self._fullname = fullname or self._person @property
MIT License
argoproj-labs/argo-client-python
argo/workflows/client/models/v1alpha1_http_artifact.py
V1alpha1HTTPArtifact.__init__
python
def __init__(self, headers=None, url=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._headers = None self._url = None self.discriminator = None if headers is not None: self.headers = headers self.url = url
V1alpha1HTTPArtifact - a model defined in OpenAPI
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/models/v1alpha1_http_artifact.py#L45-L57
import pprint import re import six from argo.workflows.client.configuration import Configuration class V1alpha1HTTPArtifact(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'headers': 'list[V1alpha1Header]', 'url': 'str' } attribute_map = { 'headers': 'headers', 'url': 'url' }
Apache License 2.0
google/cloud-forensics-utils
libcloudforensics/providers/gcp/internal/compute.py
GoogleCloudCompute.CreateInstanceFromRequest
python
def CreateInstanceFromRequest( self, request_body: Dict[str, str], zone: Optional[str] = None) -> 'GoogleComputeInstance': instance_name = request_body['name'] if not common.COMPUTE_RFC1035_REGEX.match(instance_name): raise errors.InvalidNameError( 'Instance name {0:s} does not comply with {1:s}.'.format( instance_name, common.COMPUTE_RFC1035_REGEX.pattern), __name__) gce_instance_client = self.GceApi().instances() compute_zone = zone if zone else self.default_zone params = { 'project': self.project_id, 'zone': compute_zone, 'body': request_body } try: response = common.ExecuteRequest(gce_instance_client, 'insert', params)[0] self.BlockOperation(response, zone=compute_zone) except HttpError as e: if e.resp.status == 409: msg = ( 'An instance with the name {0:s} already exists ' 'in project {1:s}').format(instance_name, self.project_id) raise errors.ResourceAlreadyExistsError(msg, __name__) from e msg = 'Error while creating instance {0:s}'.format(instance_name) raise errors.ResourceCreationError(msg, __name__) from e return GoogleComputeInstance( project_id=self.project_id, zone=compute_zone, name=instance_name)
Creates an instance from an instance.insert request body. Args: request_body: Insert instance request body at https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert#request-body # pylint: disable=line-too-long zone: Compute zone to start the instance in, default is self.default_zone. Returns: Compute instance object. Raises: ResourceAlreadyExistsError: If an instance with the same name already exists. InvalidNameError: If istance name is invalid.
https://github.com/google/cloud-forensics-utils/blob/38142cf3e00f70d976aa42aa2f9a1981c0240b19/libcloudforensics/providers/gcp/internal/compute.py#L510-L551
import os import subprocess import time from collections import defaultdict from typing import Dict, Tuple, List, TYPE_CHECKING, Union, Optional, Any from googleapiclient.errors import HttpError from libcloudforensics.providers.gcp.internal import build from libcloudforensics.providers.gcp.internal import common from libcloudforensics.providers.gcp.internal import compute_base_resource from libcloudforensics.scripts import utils from libcloudforensics import logging_utils from libcloudforensics import errors if TYPE_CHECKING: import googleapiclient logging_utils.SetUpLogger(__name__) logger = logging_utils.GetLogger(__name__) DEFAULT_MACHINE_TYPE = 'e2-standard' E2_STANDARD_CPU_CORES = [2, 4, 8, 16, 32] NON_HIERARCHICAL_FW_POLICY_LEVEL = 999 class GoogleCloudCompute(common.GoogleCloudComputeClient): def __init__( self, project_id: str, default_zone: Optional[str] = None) -> None: self.project_id = project_id self.default_zone = default_zone or 'us-central1-f' self.default_region = self.default_zone.rsplit('-', 1)[0] self._instances = {} self._disks = {} self._region_disks = {} super().__init__(self.project_id) def Instances(self, refresh: bool = True) -> Dict[str, 'GoogleComputeInstance']: if not refresh and self._instances: return self._instances self._instances = self.ListInstances() return self._instances def Disks(self, refresh: bool = True) -> Dict[str, 'GoogleComputeDisk']: if not refresh and self._disks: return self._disks self._disks = self.ListDisks() return self._disks def RegionDisks( self, refresh: Optional[bool] = True) -> Dict[str, 'GoogleRegionComputeDisk']: if not refresh and self._region_disks: return self._region_disks self._region_disks = self.ListRegionDisks() return self._region_disks def ListInstances(self) -> Dict[str, 'GoogleComputeInstance']: instances = {} gce_instance_client = self.GceApi().instances() responses = common.ExecuteRequest( gce_instance_client, 'aggregatedList', {'project': self.project_id}) for response in responses: for zone in response['items']: try: for instance in response['items'][zone]['instances']: _, zone = instance['zone'].rsplit('/', 1) name = instance['name'] deletion_protection = instance.get('deletionProtection', False) instances[name] = GoogleComputeInstance( self.project_id, zone, name, labels=instance.get('labels'), deletion_protection=deletion_protection) except KeyError: pass return instances def ListMIGSByInstanceName(self, zone: str) -> Dict[str, str]: groups = self.ListMIGS(zone) groups_by_instance = {} for group_id, instances in groups.items(): for instance in instances: if instance.name in groups_by_instance: raise RuntimeError('Multiple managed instance groups for instance') groups_by_instance[instance.name] = group_id return groups_by_instance def ListMIGS(self, zone: str) -> Dict[str, List['GoogleComputeInstance']]: groups_client = self.GceApi().instanceGroupManagers() responses = common.ExecuteRequest( groups_client, 'list', { 'project': self.project_id, 'zone': zone, }) groups = defaultdict(list) for response in responses: for group in response.get('items', []): instances = self._ListInstancesForMIG(zone, group['name']) groups[group['name']].extend(instances) return groups def _ListInstancesForMIG(self, zone: str, group_id: str) -> List['GoogleComputeInstance']: groups_client = self.GceApi().instanceGroupManagers() responses = common.ExecuteRequest( groups_client, 'listManagedInstances', { 'project': self.project_id, 'zone': zone, 'instanceGroupManager': group_id, }) managed_instances = [] for response in responses: for instance in response.get('managedInstances', []): name = instance['instance'].split('/')[-1] instance = GoogleComputeInstance(self.project_id, zone, name) managed_instances.append(instance) return managed_instances def ListDisks(self) -> Dict[str, 'GoogleComputeDisk']: disks = {} gce_disk_client = self.GceApi().disks() responses = common.ExecuteRequest( gce_disk_client, 'aggregatedList', {'project': self.project_id}) for response in responses: for zone in response['items']: try: for disk in response['items'][zone]['disks']: _, zone = disk['zone'].rsplit('/', 1) name = disk['name'] disks[name] = GoogleComputeDisk( self.project_id, zone, name, labels=disk.get('labels')) except KeyError: pass return disks def ListComputeRegions(self) -> List[str]: gce_regions_client = self.GceApi().regions() responses = common.ExecuteRequest( gce_regions_client, 'list', {'project': self.project_id}) regions = [] for response in responses: regions.extend(response.get('items', [])) return [region['name'] for region in regions] def ListRegionDisks(self) -> Dict[str, 'GoogleRegionComputeDisk']: region_disks = {} gce_region_disk_client = self.GceApi().regionDisks() for region in self.ListComputeRegions(): responses = common.ExecuteRequest( gce_region_disk_client, 'list', { 'project': self.project_id, 'region': region }) for response in responses: disks = response.get('items', []) for disk in disks: name = disk['name'] region_disks[name] = GoogleRegionComputeDisk( self.project_id, region, name, labels=disk.get('labels')) return region_disks def GetRegionDisk(self, disk_name: str) -> 'GoogleRegionComputeDisk': region_disks = self.RegionDisks() try: return region_disks[disk_name] except KeyError as e: raise errors.ResourceNotFoundError( 'Regional disk {0:s} was not found in project {1:s}'.format( disk_name, self.project_id), __name__) from e def GetInstance(self, instance_name: str) -> 'GoogleComputeInstance': instances = self.Instances() instance = instances.get(instance_name) if not instance: raise errors.ResourceNotFoundError( 'Instance {0:s} was not found in project {1:s}'.format( instance_name, self.project_id), __name__) return instance def GetDisk(self, disk_name: str) -> 'GoogleComputeDisk': disks = self.Disks() try: return disks[disk_name] except KeyError as e: raise errors.ResourceNotFoundError( 'Disk {0:s} was not found in project {1:s}'.format( disk_name, self.project_id), __name__) from e def CreateDiskFromSnapshot( self, snapshot: 'GoogleComputeSnapshot', disk_name: Optional[str] = None, disk_name_prefix: Optional[str] = None, disk_type: str = 'pd-standard') -> 'GoogleComputeDisk': if not disk_name: disk_name = common.GenerateDiskName(snapshot, disk_name_prefix) body = { 'name': disk_name, 'sourceSnapshot': snapshot.GetSourceString(), 'type': 'projects/{0:s}/zones/{1:s}/diskTypes/{2:s}'.format( self.project_id, self.default_zone, disk_type) } try: gce_disks_client = self.GceApi().disks() request = gce_disks_client.insert( project=self.project_id, zone=self.default_zone, body=body) response = request.execute() except HttpError as exception: if exception.resp.status == 409: raise errors.ResourceAlreadyExistsError( 'Disk {0:s} already exists: {1!s}'.format(disk_name, exception), __name__) from exception raise errors.ResourceCreationError( 'Unknown error occurred when creating disk from Snapshot:' ' {0!s}'.format(exception), __name__) from exception self.BlockOperation(response, zone=self.default_zone) return GoogleComputeDisk( project_id=self.project_id, zone=self.default_zone, name=disk_name) def GetMachineTypes(self, machine_type: str, zone: Optional[str] = None) -> Dict[str, Any]: compute_zone = zone if zone else self.default_zone machine_types_client = self.GceApi().machineTypes() params = { 'project': self.project_id, 'zone': compute_zone, 'machineType': machine_type } return common.ExecuteRequest(machine_types_client, 'get', params)[0] def GetDiskTypes(self, disk_type: str, zone: Optional[str] = None) -> Dict[str, Any]: compute_zone = zone if zone else self.default_zone disk_types_client = self.GceApi().diskTypes() params = { 'project': self.project_id, 'zone': compute_zone, 'diskType': disk_type } return common.ExecuteRequest(disk_types_client, 'get', params)[0] def GetImageFamily(self, image_family: str, project: Optional[str] = None) -> Dict[str, Any]: images_client = self.GceApi().images() params = {'project': project, 'family': image_family} return common.ExecuteRequest(images_client, 'getFromFamily', params)[0] def GetNetwork(self, network_name: str) -> Dict[str, Any]: networks_client = self.GceApi().networks() params = { 'project': self.project_id, 'network': network_name, } return common.ExecuteRequest(networks_client, 'get', params)[0]
Apache License 2.0
conducto/conducto
image/internal_image.py
Image._build
python
async def _build(self, st: "HistoryEntry"): assert self.dockerfile is not None or self.dockerfile_text is not None if self.needs_cloning(): gitroot = self._get_clone_dest() context = ( self.context.to_docker_mount(gitroot=gitroot) if self.context else None ) dockerfile = ( self.dockerfile.to_docker_mount(gitroot=gitroot) if self.dockerfile else "-" ) elif self._is_s3_url(): s3root = self._get_s3_dest() context = ( self.context.to_docker_mount(s3root=s3root) if self.context else None ) dockerfile = ( self.dockerfile.to_docker_mount(s3root=s3root) if self.dockerfile else "-" ) else: context = ( self.context.to_docker_mount(pipeline_id=self._pipeline_id) if self.context else None ) dockerfile = ( self.dockerfile.to_docker_mount(pipeline_id=self._pipeline_id) if self.dockerfile else "-" ) dockerfile_args = ["-f", dockerfile] if context is not None: dockerfile_args.append(context) input = None if self.dockerfile_text is None else self.dockerfile_text.encode() pipeline_id = os.getenv("CONDUCTO_PIPELINE_ID", self._pipeline_id) build_args = [] if self.docker_build_args is not None: for k, v in self.docker_build_args.items(): build_args += ["--build-arg", f"{k}={v}"] await st.run( "docker", "build", "-t", self.name_built, "--label", "com.conducto.user", "--label", f"com.conducto.pipeline={pipeline_id}", *build_args, *dockerfile_args, env={**os.environ, "DOCKER_BUILDKIT": "1"}, input=input, )
Build this Image's `dockerfile` or `dockerfile_text`. If `copy_*` or `install_*` are passed, then additional code or packages will be added in a later step.
https://github.com/conducto/conducto/blob/b480780905f5a25e8c803b60ca7cdf6976ce5ef6/image/internal_image.py#L853-L918
import asyncio import contextlib import concurrent.futures import functools import hashlib import json import os import subprocess import sys import time import traceback import typing from conducto.shared import ( async_utils, client_utils, constants, imagepath, types as t, log, ) import conducto from . import dockerfile as dockerfile_mod CONST_EE = constants.ExecutionEnv if sys.version_info >= (3, 7): asynccontextmanager = contextlib.asynccontextmanager else: from conducto.shared import async_backport asynccontextmanager = async_backport.asynccontextmanager class Status: PENDING = "pending" QUEUED = "queued" CLONING = "cloning" SYNCING_S3 = "syncing_s3" PULLING = "pulling" BUILDING = "building" INSTALLING = "installing" COPYING = "copying" EXTENDING = "extending" PUSHING = "pushing" DONE = "done" ERROR = "error" CANCELLED = "cancelled" order = [ PENDING, QUEUED, CLONING, PULLING, BUILDING, INSTALLING, COPYING, EXTENDING, PUSHING, DONE, ERROR, CANCELLED, ] class Image: _PULLED_IMAGES = set() _CONTEXT = None _CLONE_LOCKS = {} _COMPLETED_CLONES = set() _DOCKERHUB_LOGIN_ATTEMPTED = False AUTO = "__auto__" def __init__( self, image=None, *, instantiation_directory=None, dockerfile=None, dockerfile_text=None, docker_build_args=None, context=None, copy_repo=None, copy_dir=None, copy_url=None, copy_branch=None, docker_auto_workdir=True, install_pip=None, install_npm=None, install_packages=None, install_docker=False, path_map=None, shell=AUTO, name=None, git_urls=None, reqs_py=None, reqs_npm=None, reqs_packages=None, reqs_docker=False, **kwargs, ): kwargs.pop("pre_built", None) kwargs.pop("git_sha", None) if len(kwargs): EE = constants.ExecutionEnv if EE.value() not in (EE.manager_all | EE.agent): raise ValueError(f"unknown args: {','.join(kwargs)}") if name is None: raise Exception("Image has no name") self.name = name if bool(image) + bool(dockerfile) + bool(dockerfile_text) > 1: raise ValueError( f"Must specify at most 1: image={image} dockerfile={dockerfile} " f"dockerfile_text=({len(dockerfile_text or '')} chars)" ) if not image and not dockerfile and not dockerfile_text: image = f"python:{sys.version_info[0]}.{sys.version_info[1]}-slim" if not install_pip: install_pip = ["conducto"] if copy_dir and copy_url: raise ValueError( f"Must not specify copy_dir ({copy_dir}) and copy_url ({copy_url})." ) if copy_url: self.copy_url = copy_url is_s3_url = self._is_s3_url() if is_s3_url and copy_branch: raise ValueError( f"If specifying an s3 copy_url ({copy_url}) must " f"not specify copy_branch ({copy_branch})" ) elif not is_s3_url and not copy_branch: raise ValueError( f"If specifying copy_url ({copy_url}) must " f"also specify copy_branch ({copy_branch})" ) self.image = image self.instantiation_directory = instantiation_directory self.dockerfile = dockerfile self.dockerfile_text = dockerfile_text self.docker_build_args = docker_build_args self.context = context self.copy_repo = copy_repo self.copy_dir = copy_dir self.copy_dir_original = copy_dir self.copy_url = copy_url self.copy_branch = copy_branch self.docker_auto_workdir = docker_auto_workdir self.install_pip = install_pip or reqs_py self.install_npm = install_npm or reqs_npm self.install_packages = install_packages or reqs_packages self.install_docker = install_docker or reqs_docker self.path_map = path_map or {} self.shell = shell if self.path_map: def auto_detect_deserialize(e): if e.find("pathsep") >= 0: return imagepath.Path.from_dockerhost_encoded(e) return e self.path_map = { auto_detect_deserialize(external): internal for external, internal in self.path_map.items() } if self.copy_repo: repo_root: typing.Optional[imagepath.Path] = None if CONST_EE.value() in CONST_EE.manager_all: pm = self.path_map elif CONST_EE.value() in CONST_EE.worker_all: pm = _get_path_map() elif CONST_EE.value() == CONST_EE.EXTERNAL: pm = {} else: raise EnvironmentError( f"Don't know what path_map to use for ExecutionEnvironment={CONST_EE.value()}" ) for k, v in pm.items(): if v == constants.ConductoPaths.COPY_LOCATION: repo_root = k if not repo_root: repo_root = self._detect_repo_root() mark = repo_root.get_git_marks()[-1] repo_url = mark.get("url") repo_branch = mark.get("branch") if git_urls: for url in git_urls: sanitized = imagepath.Path._sanitize_git_url(url) repo_root = repo_root.mark_named_share(sanitized, "") if self.copy_branch and repo_branch and self.copy_branch != repo_branch: raise ValueError( f"copy_branch ({self.copy_branch}) does not match branch inherited " f"from path_map ({repo_branch}" ) self.copy_branch = self.copy_branch or repo_branch if self.copy_branch: self.copy_url = repo_url else: self.copy_dir = repo_root repo_root = self._get_contextual_path_helper(repo_root) self.path_map[repo_root] = constants.ConductoPaths.COPY_LOCATION if not self.context: self.context = repo_root if ( repo_url and CONST_EE.value() == CONST_EE.EXTERNAL and not CONST_EE.headless() ): sharename = imagepath.Path._sanitize_git_url(repo_url) self.share_directory(sharename, repo_root) if self.dockerfile: self.dockerfile = self._get_contextual_path_helper(self.dockerfile) if not self.context: one_level_up = os.path.dirname(self.dockerfile.to_docker_host()) self.context = self._get_contextual_path_helper(one_level_up) if self.context: self.context = self._get_contextual_path_helper(self.context) if self.copy_dir: self.copy_dir = self._get_contextual_path_helper(self.copy_dir) self.path_map[self.copy_dir] = constants.ConductoPaths.COPY_LOCATION if self.path_map: self.path_map = { self._get_contextual_path_helper(external): internal for external, internal in self.path_map.items() } self.history = [HistoryEntry(Status.PENDING)] self._make_fut: typing.Optional[asyncio.Future] = None self._cloud_tag_convert = None self._pipeline_id = os.getenv("CONDUCTO_PIPELINE_ID") if CONST_EE.headless() and not self.is_cloud_building(): raise ValueError( "If in headless mode (like from a Git webhook), image must be cloud-" "buildable. Cannot use copy_dir. If using dockerfile or copy_repo, " "then copy_branch must be set so that the contents come from Git " "instead of the local file system." ) def __eq__(self, other): if isinstance(other, Image): return self.to_dict() == other.to_dict() else: try: return Image(**other.to_dict()) == self except: return False def _detect_repo_root(self): if CONST_EE.headless(): git_root = { "type": "git", "url": self.copy_url, "branch": self.copy_branch, } return imagepath.Path.from_marked_root(git_root, "") else: outside_dir = self.instantiation_directory git_root = imagepath.Path._get_git_root(outside_dir) if not git_root: raise ValueError( f"copy_repo=True was specified, but could not find Git " f"root for {outside_dir}." ) git_url = imagepath.Path._get_git_origin(git_root) return self.get_contextual_path( git_root, branch=self.copy_branch, url=self.copy_url or git_url ).to_docker_host_path() @staticmethod def _serialize_path(p): return p._id() if isinstance(p, imagepath.Path) else p @staticmethod def _serialize_pathmap(pathmap): if pathmap: return {p.linear(): v for p, v in pathmap.items()} return None @property def id(self): try: return self.to_dict() except: print(traceback.format_exc()) raise def to_dict(self): return { "name": self.name, "image": self.image, "dockerfile": self._serialize_path(self.dockerfile), "dockerfile_text": self.dockerfile_text, "docker_build_args": self.docker_build_args, "docker_auto_workdir": self.docker_auto_workdir, "context": self._serialize_path(self.context), "copy_repo": self.copy_repo, "copy_dir": self._serialize_path(self.copy_dir), "copy_url": self.copy_url, "copy_branch": self.copy_branch, "install_pip": self.install_pip, "install_npm": self.install_npm, "install_packages": self.install_packages, "install_docker": self.install_docker, "path_map": self._serialize_pathmap(self.path_map), "shell": self.shell, } def to_update_dict(self): return {"shell": self.shell} def _get_image_tag(self): d = self.to_dict() del d["name"] del d["shell"] key = json.dumps(d).encode() return hashlib.md5(key).hexdigest() def _get_contextual_path_helper( self, p: typing.Union[imagepath.Path, dict, str], **kwargs ) -> imagepath.Path: return self.get_contextual_path( p, branch=self.copy_branch, url=self.copy_url, **kwargs ) def get_contextual_path( self, p: typing.Union[imagepath.Path, dict, str], *, named_shares=True, branch=None, url=None, ) -> imagepath.Path: if isinstance(p, imagepath.Path): return p if isinstance(p, dict) and "pathsep" in p: return imagepath.Path.from_dockerhost_encoded(p) if constants.ExecutionEnv.value() in constants.ExecutionEnv.manager_all: return imagepath.Path.from_localhost(p) op = os.path if not op.isabs(p): p = op.join(self.instantiation_directory, p) p = op.realpath(p) for external, internal in _get_path_map().items(): if p.startswith(internal): tail = p[len(internal) :].lstrip("/") return external.append(tail, sep="/") if op.exists(p): result = imagepath.Path.from_localhost(p) result = result.mark_git_root(branch=branch, url=url) else: result = imagepath.Path.from_localhost(p) result = result.to_docker_host_path() if named_shares: conf = conducto.api.Config() shares = conf.get_named_share_mapping(conf.default_profile) for name, paths in shares.items(): for path in paths: path = imagepath.Path.from_dockerhost(path) if result.is_subdir_of(path): result = result.mark_named_share(name, path.to_docker_host()) return result def share_directory(self, name, relative): path = self.get_contextual_path(relative, named_shares=False) config = conducto.api.Config() config.register_named_share(config.default_profile, name, path) register_directory = share_directory async def _sha1(self): if self.needs_cloning(): root = self._get_clone_dest() else: root = self.copy_dir.to_docker_mount(pipeline_id=self._pipeline_id) out, err = await async_utils.run_and_check( "git", "-C", root, "rev-parse", "HEAD" ) return out.decode().strip() @property def name_built(self): if self.needs_building(): return f"conducto_built:{self._pipeline_id}_{self._get_image_tag()}" else: return self.image @property def name_installed(self): if self.needs_installing(): return f"conducto_installed:{self._pipeline_id}_{self._get_image_tag()}" else: return self.name_built @property def name_copied(self): if self.needs_copying() or os.getenv("CONDUCTO_TEMPLATE_SOURCE"): return f"conducto_copied:{self._pipeline_id}_{self._get_image_tag()}" else: return self.name_installed @property def name_local_extended(self): return f"conducto_extended:{self._pipeline_id}_{self._get_image_tag()}" @property def name_cloud_extended(self): if self._pipeline_id is None: raise ValueError("Must specify pipeline_id before pushing to cloud") registry, repo_name = _get_repo_info() return f"{registry}/{repo_name}:{self._get_image_tag()}_{self._pipeline_id}" @property def status(self): return self.history[-1].status @property def build_error(self): if self.history[-1].status == Status.ERROR: h = self.history[-1] if h.stderr: return h.stderr else: return h.stdout return None async def make(self, push_to_cloud, callback=lambda: None): if self._make_fut is None: self._make_fut = asyncio.ensure_future(self._make(push_to_cloud, callback)) is_already_done = self._make_fut.done() try: await self._make_fut finally: if not is_already_done: callback() async def _make(self, push_to_cloud, callback): async for _ in self._make_generator(push_to_cloud, callback): pass async def _make_generator(self, push_to_cloud, callback, force_rebuild=False): if self.history and self.history[-1].end is None: await self.history[-1].finish() if callback: await callback(Status.PENDING, Status.DONE, self.history[-1]) if self.needs_cloning(): async with self._new_status(Status.CLONING, callback) as st: if force_rebuild or not self._clone_complete(): await callback() await self._clone(st) await st.finish() else: await st.finish("Using cached clone") yield if self._is_s3_url(): async with self._new_status(Status.SYNCING_S3, callback) as st: if force_rebuild or not self._sync_s3_complete(): await callback() await self._sync_s3(st) await st.finish() else: await st.finish("Using cached s3 copy") if await self.needs_pulling(): async with self._new_status(Status.PULLING, callback) as st: if ( force_rebuild or os.getenv("CONDUCTO_TEMPLATE_SOURCE") or not await self._image_exists(self.image) ): await callback() await self._pull(st) await st.finish() else: await st.finish("Image already pulled") yield if self.needs_building(): async with self._new_status(Status.BUILDING, callback) as st: if force_rebuild or not await self._image_exists(self.name_built): await callback() await self._build(st) await st.finish() else: await st.finish("Dockerfile already built") yield if self.needs_installing(): async with self._new_status(Status.INSTALLING, callback) as st: if force_rebuild or not await self._image_exists(self.name_installed): await callback() await self._install(st) await st.finish() else: await st.finish("Python libraries already installed.") yield if self.needs_copying(): async with self._new_status(Status.COPYING, callback) as st: if force_rebuild or not await self._image_exists(self.name_copied): await callback() await self._copy(st) await st.finish() else: await st.finish("Code already copied.") yield async with self._new_status(Status.EXTENDING, callback) as st: if force_rebuild or not await self._image_exists(self.name_local_extended): await callback() await self._extend(st) await st.finish() else: await st.finish("Conducto toolchain already added") yield if push_to_cloud: async with self._new_status(Status.PUSHING, callback) as st: await callback() await self._push(st) await st.finish() await self._mark_done() if callback: await callback(Status.DONE, Status.DONE, self.history[-1]) async def _mark_done(self): if self.history and self.history[-1].end is None: await self.history[-1].finish() if not self.history or self.history[-1].status != Status.DONE: self.history.append(HistoryEntry(Status.DONE, finish=True)) async def _image_exists(self, image): try: await async_utils.run_and_check("docker", "image", "inspect", image) except subprocess.CalledProcessError: return False else: return True @asynccontextmanager async def _new_status(self, status, callback): entry = HistoryEntry(status) self.history.append(entry) try: yield entry except subprocess.CalledProcessError: await entry.finish() if callback: await callback(status, Status.ERROR, entry) self.history.append(HistoryEntry(Status.ERROR, finish=True)) if callback: await callback(Status.ERROR, Status.ERROR, self.history[-1]) raise except (asyncio.CancelledError, concurrent.futures.CancelledError): await entry.finish(None, None) if callback: await callback(status, Status.CANCELLED, entry) self.history.append(HistoryEntry(Status.CANCELLED, finish=True)) if callback: await callback(Status.ERROR, Status.ERROR, self.history[-1]) raise except Exception: await entry.finish(None, "\r\n".join(traceback.format_exc().splitlines())) if callback: await callback(status, Status.ERROR, entry) self.history.append(HistoryEntry(Status.ERROR, finish=True)) if callback: await callback(Status.ERROR, Status.ERROR, self.history[-1]) raise else: if not entry.end: await entry.finish() if callback: await callback(status, None, entry) def needs_cloning(self): if os.getenv("CONDUCTO_TEMPLATE_SOURCE"): return False return bool(self.copy_branch) def _get_clone_dest(self): return constants.ConductoPaths.git_clone_dest( pipeline_id=self._pipeline_id, url=self.copy_url, branch=self.copy_branch ) def _get_s3_dest(self): return constants.ConductoPaths.s3_copy_dest( pipeline_id=self._pipeline_id, url=self.copy_url ) def _get_clone_lock(self, dest): if dest in Image._CLONE_LOCKS: lock = Image._CLONE_LOCKS[dest] else: lock = Image._CLONE_LOCKS[dest] = asyncio.Lock() return lock def _clone_complete(self): dest = self._get_clone_dest() return dest in Image._COMPLETED_CLONES def _sync_s3_complete(self): dest = self._get_s3_dest() return dest in Image._COMPLETED_CLONES async def _clone(self, st: "HistoryEntry"): from conducto.integrations import git dest = self._get_clone_dest() if constants.ExecutionEnv.images_only(): real_url = self.copy_url fake_url = self.copy_url else: real_url = git.clone_url(self.copy_url) fake_url = git.clone_url( self.copy_url, token=t.Token("{__conducto_token__}") ) async with self._get_clone_lock(dest): if os.path.exists(dest): await async_utils.run_and_check( "git", "-C", dest, "config", "remote.origin.url", real_url ) await st.run("git", "-C", dest, "fetch") await st.run( "git", "-C", dest, "reset", "--hard", f"origin/{self.copy_branch}", ) else: await st.run( "git", "clone", "--single-branch", "--branch", self.copy_branch, real_url, dest, ) await st.run("git", "-C", dest, "config", "remote.origin.url", fake_url) Image._COMPLETED_CLONES.add(dest) async def _sync_s3(self, st: "HistoryEntry"): dest = self._get_s3_dest() async with self._get_clone_lock(dest): creds = conducto.api.Auth().get_credentials() env_with_creds = { **os.environ, "AWS_ACCESS_KEY_ID": creds["AccessKeyId"], "AWS_SECRET_ACCESS_KEY": creds["SecretKey"], "AWS_SESSION_TOKEN": creds["SessionToken"], } await st.run( "aws", "s3", "sync", "--exclude", ".git*", "--exclude", "*/.git*", self.copy_url, dest, env=env_with_creds, ) Image._COMPLETED_CLONES.add(dest) async def _pull(self, st: "HistoryEntry"): if os.getenv("CONDUCTO_TEMPLATE_SOURCE"): t_id = os.getenv("CONDUCTO_TEMPLATE_SOURCE") registry, repo_name = _get_repo_info() tempsrc = f"{registry}/{repo_name}:template-{t_id}-{self._get_image_tag()}" log.log(f"building {self.name} based on template {tempsrc}") await st.run("docker", "pull", tempsrc) await st.run("docker", "tag", tempsrc, self.name_copied) return is_dockerhub_image = True if "/" in self.image: possible_domain = self.image.split("/", 1)[0] if "." in possible_domain or ":" in possible_domain: is_dockerhub_image = False if is_dockerhub_image and not Image._DOCKERHUB_LOGIN_ATTEMPTED: if ( not constants.ExecutionEnv.images_only() and not conducto.api.Auth().is_anonymous() ): secrets = await conducto.api.AsyncSecrets().get_user_secrets( include_org_secrets=True ) if "DOCKERHUB_USER" in secrets and "DOCKERHUB_PASSWORD" in secrets: await async_utils.run_and_check( "docker", "login", "-u", secrets["DOCKERHUB_USER"], "--password-stdin", input=secrets["DOCKERHUB_PASSWORD"].encode("utf8"), ) Image._DOCKERHUB_LOGIN_ATTEMPTED = True await st.run("docker", "pull", self.image)
Apache License 2.0
scikit-hep/uproot4
src/uproot/interpretation/numerical.py
AsFloat16.to_dtype
python
def to_dtype(self): return numpy.dtype((numpy.float32, self.to_dims))
The ``numpy.dtype`` of the output array. A shape (``dtype.shape``) can be used to construct a fixed-size array for each entry. (Not applicable to variable-length lists! See :doc:`uproot.interpretation.jagged.AsJagged`.) The finalized array's ``array.shape[1:] == dtype.shape``.
https://github.com/scikit-hep/uproot4/blob/e0db77a2a10d701cb48f72e9f0d7867e1589572d/src/uproot/interpretation/numerical.py#L648-L657
from __future__ import absolute_import import numpy import uproot def _dtype_shape(dtype): shape = () while dtype.subdtype is not None: dtype, s = dtype.subdtype shape = shape + s return dtype, shape class Numerical(uproot.interpretation.Interpretation): def _wrap_almost_finalized(self, array): return array def final_array( self, basket_arrays, entry_start, entry_stop, entry_offsets, library, branch ): self.hook_before_final_array( basket_arrays=basket_arrays, entry_start=entry_start, entry_stop=entry_stop, entry_offsets=entry_offsets, library=library, branch=branch, ) if entry_start >= entry_stop: output = library.empty((0,), self.to_dtype) else: length = 0 start = entry_offsets[0] for _, stop in enumerate(entry_offsets[1:]): if start <= entry_start and entry_stop <= stop: length += entry_stop - entry_start elif start <= entry_start < stop: length += stop - entry_start elif start <= entry_stop <= stop: length += entry_stop - start elif entry_start < stop and start <= entry_stop: length += stop - start start = stop output = library.empty((length,), self.to_dtype) start = entry_offsets[0] for basket_num, stop in enumerate(entry_offsets[1:]): if start <= entry_start and entry_stop <= stop: local_start = entry_start - start local_stop = entry_stop - start basket_array = basket_arrays[basket_num] output[:] = basket_array[local_start:local_stop] elif start <= entry_start < stop: local_start = entry_start - start local_stop = stop - start basket_array = basket_arrays[basket_num] output[: stop - entry_start] = basket_array[local_start:local_stop] elif start <= entry_stop <= stop: local_start = 0 local_stop = entry_stop - start basket_array = basket_arrays[basket_num] output[start - entry_start :] = basket_array[local_start:local_stop] elif entry_start < stop and start <= entry_stop: basket_array = basket_arrays[basket_num] output[start - entry_start : stop - entry_start] = basket_array start = stop self.hook_before_library_finalize( basket_arrays=basket_arrays, entry_start=entry_start, entry_stop=entry_stop, entry_offsets=entry_offsets, library=library, branch=branch, output=output, ) output = self._wrap_almost_finalized(output) output = library.finalize(output, branch, self, entry_start, entry_stop) self.hook_after_final_array( basket_arrays=basket_arrays, entry_start=entry_start, entry_stop=entry_stop, entry_offsets=entry_offsets, library=library, branch=branch, output=output, ) return output _numpy_byteorder_to_cache_key = { "!": "B", ">": "B", "<": "L", "|": "L", "=": "B" if numpy.dtype(">f8").isnative else "L", } _dtype_kind_itemsize_to_typename = { ("b", 1): "bool", ("i", 1): "int8_t", ("u", 1): "uint8_t", ("i", 2): "int16_t", ("u", 2): "uint16_t", ("i", 4): "int32_t", ("u", 4): "uint32_t", ("i", 8): "int64_t", ("u", 8): "uint64_t", ("f", 4): "float", ("f", 8): "double", } class AsDtype(Numerical): def __init__(self, from_dtype, to_dtype=None): self._from_dtype = numpy.dtype(from_dtype) if to_dtype is None: self._to_dtype = self._from_dtype.newbyteorder("=") else: self._to_dtype = numpy.dtype(to_dtype) def __repr__(self): if self._to_dtype == self._from_dtype.newbyteorder("="): return "AsDtype({0})".format(repr(str(self._from_dtype))) else: return "AsDtype({0}, {1})".format( repr(str(self._from_dtype)), repr(str(self._to_dtype)) ) def __eq__(self, other): return ( type(other) is AsDtype and self._from_dtype == other._from_dtype and self._to_dtype == other._to_dtype ) @property def from_dtype(self): return self._from_dtype @property def to_dtype(self): return self._to_dtype @property def itemsize(self): return self._from_dtype.itemsize @property def inner_shape(self): _, s = _dtype_shape(self._from_dtype) return s @property def numpy_dtype(self): return self._to_dtype def awkward_form( self, file, index_format="i64", header=False, tobject_header=True, breadcrumbs=(), ): awkward = uproot.extras.awkward() d, s = _dtype_shape(self._to_dtype) out = uproot._util.awkward_form( d, file, index_format, header, tobject_header, breadcrumbs ) for size in s[::-1]: out = awkward.forms.RegularForm(out, size) return out @property def cache_key(self): def form(dtype, name): d, s = _dtype_shape(dtype) return "{0}{1}{2}({3}{4})".format( _numpy_byteorder_to_cache_key[d.byteorder], d.kind, d.itemsize, ",".join(repr(x) for x in s), name, ) if self.from_dtype.names is None: from_dtype = form(self.from_dtype, "") else: from_dtype = ( "[" + ",".join( form(self.from_dtype[n], "," + repr(n)) for n in self.from_dtype.names ) + "]" ) if self.to_dtype.names is None: to_dtype = form(self.to_dtype, "") else: to_dtype = ( "[" + ",".join( form(self.to_dtype[n], "," + repr(n)) for n in self.to_dtype.names ) + "]" ) return "{0}({1},{2})".format(type(self).__name__, from_dtype, to_dtype) @property def typename(self): def form(dtype): d, s = _dtype_shape(dtype) return _dtype_kind_itemsize_to_typename[d.kind, d.itemsize] + "".join( "[" + str(dim) + "]" for dim in s ) if self.from_dtype.names is None: return form(self.from_dtype) else: return ( "struct {" + " ".join( "{0} {1};".format(form(self.from_dtype[n]), n) for n in self.from_dtype.names ) + "}" ) def basket_array( self, data, byte_offsets, basket, branch, context, cursor_offset, library ): self.hook_before_basket_array( data=data, byte_offsets=byte_offsets, basket=basket, branch=branch, context=context, cursor_offset=cursor_offset, library=library, ) dtype, shape = _dtype_shape(self._from_dtype) try: output = data.view(dtype).reshape((-1,) + shape) except ValueError: raise ValueError( """basket {0} in tree/branch {1} has the wrong number of bytes ({2}) """ """for interpretation {3} in file {4}""".format( basket.basket_num, branch.object_path, len(data), self, branch.file.file_path, ) ) self.hook_after_basket_array( data=data, byte_offsets=byte_offsets, basket=basket, branch=branch, context=context, output=output, cursor_offset=cursor_offset, library=library, ) return output def reshape(self, shape): d, s = _dtype_shape(self._from_dtype) self._from_dtype = numpy.dtype((d, shape)) d, s = _dtype_shape(self._to_dtype) self._to_dtype = numpy.dtype((d, shape)) class AsDtypeInPlace(AsDtype): def __init__(self): raise NotImplementedError class AsSTLBits(Numerical): def __init__(self): raise NotImplementedError @property def itemsize(self): return self._num_bytes + 4 class TruncatedNumerical(Numerical): @property def low(self): return self._low @property def high(self): return self._high @property def num_bits(self): return self._num_bits @property def from_dtype(self): if self.is_truncated: return numpy.dtype(({"exponent": (">u1", 0), "mantissa": (">u2", 1)}, ())) else: return numpy.dtype(">u4") @property def itemsize(self): return self.from_dtype.itemsize @property def to_dims(self): return self._to_dims @property def is_truncated(self): return self._low == 0.0 and self._high == 0.0 def __repr__(self): args = [repr(self._low), repr(self._high), repr(self._num_bits)] if self._to_dims != (): args.append("to_dims={0}".format(repr(self._to_dims))) return "{0}({1})".format(type(self).__name__, ", ".join(args)) def __eq__(self, other): return ( type(self) == type(other) and self._low == other._low and self._high == other._high and self._num_bits == other._num_bits and self._to_dims == other._to_dims ) @property def numpy_dtype(self): return self.to_dtype @property def cache_key(self): return "{0}({1},{2},{3},{4})".format( type(self).__name__, self._low, self._high, self._num_bits, self._to_dims ) def basket_array( self, data, byte_offsets, basket, branch, context, cursor_offset, library ): self.hook_before_basket_array( data=data, byte_offsets=byte_offsets, basket=basket, branch=branch, context=context, cursor_offset=cursor_offset, library=library, ) try: raw = data.view(self.from_dtype) except ValueError: raise ValueError( """basket {0} in tree/branch {1} has the wrong number of bytes ({2}) """ """for interpretation {3} (expecting raw array of {4}) in file {5}""".format( basket.basket_num, branch.object_path, len(data), self, repr(self._from_dtype), branch.file.file_path, ) ) if self.is_truncated: exponent = raw["exponent"].astype(numpy.int32) mantissa = raw["mantissa"].astype(numpy.int32) exponent <<= 23 exponent |= (mantissa & ((1 << (self.num_bits + 1)) - 1)) << ( 23 - self.num_bits ) sign = ((1 << (self.num_bits + 1)) & mantissa != 0) * -2 + 1 output = exponent.view(numpy.float32) * sign d, s = _dtype_shape(self.to_dtype) output = output.astype(d).reshape((-1,) + s) else: d, s = _dtype_shape(self.to_dtype) output = raw.astype(d).reshape((-1,) + s) numpy.multiply( output, float(self._high - self._low) / (1 << self._num_bits), out=output, ) numpy.add(output, self.low, out=output) self.hook_after_basket_array( data=data, byte_offsets=byte_offsets, basket=basket, branch=branch, context=context, cursor_offset=cursor_offset, library=library, raw=raw, output=output, ) return output class AsDouble32(TruncatedNumerical): def __init__(self, low, high, num_bits, to_dims=()): self._low = low self._high = high self._num_bits = num_bits self._to_dims = to_dims if not uproot._util.isint(num_bits) or not 2 <= num_bits <= 32: raise TypeError("num_bits must be an integer between 2 and 32 (inclusive)") if high <= low and not self.is_truncated: raise ValueError( "high ({0}) must be strictly greater than low ({1})".format(high, low) ) @property def to_dtype(self): return numpy.dtype((numpy.float64, self.to_dims)) @property def typename(self): return "Double32_t" + "".join("[" + str(dim) + "]" for dim in self._to_dims) def awkward_form( self, file, index_format="i64", header=False, tobject_header=True, breadcrumbs=(), ): awkward = uproot.extras.awkward() out = awkward.forms.NumpyForm( (), 8, "d", parameters={ "uproot": { "as": "Double32", "low": self._low, "high": self._high, "num_bits": self._num_bits, } }, ) for size in self._to_dims[::-1]: out = awkward.forms.RegularForm(out, size) return out class AsFloat16(TruncatedNumerical): def __init__(self, low, high, num_bits, to_dims=()): self._low = low self._high = high self._num_bits = num_bits self._to_dims = to_dims if not uproot._util.isint(num_bits) or not 2 <= num_bits <= 32: raise TypeError("num_bits must be an integer between 2 and 32 (inclusive)") if high <= low and not self.is_truncated: raise ValueError( "high ({0}) must be strictly greater than low ({1})".format(high, low) ) @property
BSD 3-Clause New or Revised License