repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
google-research/tensor2robot
research/qtopt/networks.py
GraspingModel.create_input_specifications
python
def create_input_specifications(cls): input_specifications = cls.create_default_input_specifications() return input_specifications
Creates the input_specifications for preprocessing for each model. Returns: input_specifications: A dictionary to specify the input modalities.
https://github.com/google-research/tensor2robot/blob/f8fa5dde145ea1bf29c1d262052e20e0df6746eb/research/qtopt/networks.py#L118-L125
from absl import logging import gin from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import seq2seq as contrib_seq2seq NUM_LAYERS = 19 BATCH_SIZE = 64 NUM_SAMPLES = 100 class GraspingModel(object): def __init__(self, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, l2_regularization=0.00007): self._batch_norm_decay = batch_norm_decay self._batch_norm_epsilon = batch_norm_epsilon self._l2_regularization = l2_regularization @property def grasp_model_input_keys(self): return ['world_vector', 'vertical_rotation'] def create_grasp_params_input(self, model_input, concat_axis=1): return tf.concat([ model_input[grasp_input] for grasp_input in self.grasp_model_input_keys ], concat_axis) @classmethod def create_default_input_specifications(cls): grasp_param_sizes = { 'projected_vector': 2, 'tip_vectors_first_finger': 2, 'tip_vectors_second_finger': 2, 'vertical_rotation': 2, 'camera_vector': 3, 'world_vector': 3, 'wrist_vector': 3, } return { 'include_initial_image': True, 'include_main_images': True, 'include_wrist_images': False, 'include_depth_images': False, 'include_segmask_images': False, 'include_present_images': False, 'include_goal_images': False, 'include_target_object_id': True, 'include_next_image': False, 'include_placing_parameters': False, 'use_displacement_pantilt': False, 'end_to_end_grasping': False, 'end_to_end_grasping_gripper_status': False, 'end_to_end_grasping_height_to_bottom': False, 'end_to_end_grasping_workspace_deltas': False, 'end_to_end_grasping_async': False, 'grasp_param_sizes': grasp_param_sizes, 'include_hand_eye_calibration': False, 'nav_to_grasp': False, } @classmethod
Apache License 2.0
jggatc/pyjsdl
draw.py
aalines
python
def aalines(surface, color, closed, pointlist, blend=1): rect = lines(surface, color, closed, pointlist, blend) return rect
Calls lines(), return bounding Rect.
https://github.com/jggatc/pyjsdl/blob/c274ce2bc2099be4eeb2886a349fb3130231f307/draw.py#L308-L313
from math import pi as _pi from pyjsdl.rect import Rect from pyjsdl.color import Color __docformat__ = 'restructuredtext' __doc__ = 'Draw shapes' _return_rect = True def rect(surface, color, rect, width=0): if hasattr(rect, 'width'): _rect = rect else: _rect = Rect(rect) if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.strokeRect(_rect.x, _rect.y, _rect.width, _rect.height) else: if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fillRect(_rect.x, _rect.y, _rect.width, _rect.height) if not _return_rect: return None if surface._display: return surface._display._surface_rect.clip(_rect) else: return surface.get_rect().clip(_rect) def circle(surface, color, position, radius, width=0): surface.beginPath() surface.arc(position[0], position[1], radius, 0, 2*_pi, False) if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() else: if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fill() if not _return_rect: return None if surface._display: return surface._display._surface_rect.clip( Rect(position[0]-radius, position[1]-radius, 2*radius, 2*radius) ) else: return surface.get_rect().clip( Rect(position[0]-radius, position[1]-radius, 2*radius, 2*radius) ) def ellipse(surface, color, rect, width=0): if hasattr(rect, 'width'): _rect = rect else: _rect = Rect(rect) surface.saveContext() surface.translate(_rect.x+int(_rect.width/2), _rect.y+int(_rect.height/2)) if _rect.width >= _rect.height: surface.scale(_rect.width/(_rect.height*1.0), 1) radius = int(_rect.height/2) else: surface.scale(1, _rect.height/(_rect.width*1.0)) radius = int(_rect.width/2) surface.beginPath() surface.arc(0, 0, radius, 0, 2*_pi, False) if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() else: if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fill() surface.restoreContext() if not _return_rect: return None if surface._display: return surface._display._surface_rect.clip(_rect) else: return surface.get_rect().clip(_rect) def arc(surface, color, rect, start_angle, stop_angle, width=1): if hasattr(rect, 'width'): _rect = rect else: _rect = Rect(rect) if _rect.width == _rect.height: surface.beginPath() surface.arc(_rect.x+int(_rect.width/2), _rect.y+int(_rect.height/2), int(_rect.width/2), -start_angle, -stop_angle, True) if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() else: surface.closePath() if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fill() else: surface.saveContext() surface.translate(_rect.x+int(_rect.width/2), _rect.y+int(_rect.height/2)) if _rect.width >= _rect.height: surface.scale(_rect.width/(_rect.height*1.0), 1) radius = int(_rect.height/2) else: surface.scale(1, _rect.height/(_rect.width*1.0)) radius = int(_rect.width/2) surface.beginPath() surface.arc(0, 0, radius, -start_angle, -stop_angle, True) if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() else: surface.closePath() if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fill() surface.restoreContext() if not _return_rect: return None if surface._display: return surface._display._surface_rect.clip(_rect) else: return surface.get_rect().clip(_rect) def polygon(surface, color, pointlist, width=0): surface.beginPath() surface.moveTo(*pointlist[0]) for point in pointlist[1:]: surface.lineTo(*point) surface.closePath() if width: surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() else: if surface._fill_style != color: surface._fill_style = color if hasattr(color, 'a'): surface.setFillStyle(color) else: surface.setFillStyle(Color(color)) surface.fill() if not _return_rect: return None xpts = [pt[0] for pt in pointlist] ypts = [pt[1] for pt in pointlist] xmin, xmax = min(xpts), max(xpts) ymin, ymax = min(ypts), max(ypts) if surface._display: return surface._display._surface_rect.clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) else: return surface.get_rect().clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) def line(surface, color, point1, point2, width=1): surface.beginPath() surface.moveTo(*point1) surface.lineTo(*point2) surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() if not _return_rect: return None xpts = [pt[0] for pt in (point1,point2)] ypts = [pt[1] for pt in (point1,point2)] xmin, xmax = min(xpts), max(xpts) ymin, ymax = min(ypts), max(ypts) if surface._display: return surface._display._surface_rect.clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) else: return surface.get_rect().clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) def lines(surface, color, closed, pointlist, width=1): surface.beginPath() surface.moveTo(*pointlist[0]) for point in pointlist[1:]: surface.lineTo(*point) if closed: surface.closePath() surface.setLineWidth(width) if surface._stroke_style != color: surface._stroke_style = color if hasattr(color, 'a'): surface.setStrokeStyle(color) else: surface.setStrokeStyle(Color(color)) surface.stroke() if not _return_rect: return None xpts = [pt[0] for pt in pointlist] ypts = [pt[1] for pt in pointlist] xmin, xmax = min(xpts), max(xpts) ymin, ymax = min(ypts), max(ypts) if surface._display: return surface._display._surface_rect.clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) else: return surface.get_rect().clip( Rect(xmin, ymin, xmax-xmin+1, ymax-ymin+1) ) def aaline(surface, color, point1, point2, blend=1): rect = line(surface, color, point1, point2, blend) return rect
MIT License
speedify/speedify-py
speedify.py
adapter_ratelimit
python
def adapter_ratelimit(adapterID, ratelimit=0): args = ['adapter',"ratelimit"] args.append(str(adapterID)) args.append((str(ratelimit))) resultjson = _run_speedify_cmd(args) return resultjson
adapter_ratelimit(adapterID, ratelimit=0) Sets the ratelimit in bps on the adapter whose adapterID is provided (show_adapters is where you find the adapterIDs) :param adapterID: The interface adapterID :type adapterID: str :param ratelimit: The ratelimit in bps :type ratelimit: int :returns: dict -- :ref:`JSON adapter response <adapter-datalimit-daily>` from speedify.
https://github.com/speedify/speedify-py/blob/db3210f48a9234f20f28d29463934c061df2f9e0/speedify.py#L431-L447
import json import logging import subprocess import os from enum import Enum from functools import wraps from utils import use_shell logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class State(Enum): LOGGED_OUT = 0 LOGGING_IN = 1 LOGGED_IN = 2 AUTO_CONNECTING = 3 CONNECTING = 4 DISCONNECTING = 5 CONNECTED = 6 OVERLIMIT = 7 UNKNOWN = 8 class Priority(Enum): ALWAYS='always' BACKUP='backup' SECONDARY='secondary' NEVER='never' class SpeedifyError(Exception): def __init__(self, message): self.message = message class SpeedifyAPIError(SpeedifyError): def __init__(self, error_code, error_type, error_message): self.error_code = error_code self.error_type = error_type self.error_message = error_message self.message =error_message _cli_path = None def set_cli(new_cli_path): global _cli_path _cli_path = new_cli_path def get_cli(): global _cli_path if ((_cli_path == None) or (_cli_path == "")): _cli_path = _find_cli() return _cli_path def find_state_for_string(mystate): return State[str(mystate).upper().strip()] def exception_wrapper(argument): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): try: result = function(*args, **kwargs) return result except SpeedifyError as err: logger.error(argument + ": " + err.message) raise err return wrapper return decorator @exception_wrapper("Failed to connect") def connect(server=""): args = ['connect'] if(server!= None and server != ""): pieces = server.split("-") for piece in pieces: args.append(piece) logger.debug('connecting to server = ' + server) resultjson = _run_speedify_cmd(args) return resultjson def connect_closest(): return connect("closest") def connect_public(): return connect("public") def connect_private(): return connect("private") def connect_p2p(): return connect("p2p") def connect_country(country="us"): return connect(country) def connect_last(): return connect("last") @exception_wrapper("Disconnect failed") def disconnect(): _run_speedify_cmd(["disconnect"]) return True @exception_wrapper("Failed to set connect method") def connectmethod(method, country="us", city=None, num=None): args = ['connectmethod'] if method == "dedicated": method = "private" if(method == "country"): args.append(country) if(city != None): args.append(city) if (num != None): args.append(num) elif method: args.append(method) resultjson = _run_speedify_cmd(args) return resultjson def connectmethod_as_string(connectMethodObject, hypens=True): sep = " " if hypens: sep = "-" ret = connectMethodObject["connectMethod"] if ret == "country": ret = str(connectMethodObject["country"]) if connectMethodObject["city"]: ret = ret + sep + str(connectMethodObject["city"]) if connectMethodObject["num"]: ret = ret + sep + str(connectMethodObject["num"]) return ret @exception_wrapper("Failed to login") def login(user, password): args = ['login', user, password] resultjson = _run_speedify_cmd(args) return find_state_for_string(resultjson["state"]) @exception_wrapper("Failed to logout") def logout(): jret = _run_speedify_cmd(['logout']) return find_state_for_string(jret["state"]) @exception_wrapper("Failed to get server list") def show_servers(): return _run_speedify_cmd(['show', 'servers']) @exception_wrapper("Failed to get privacy") def show_privacy(): return _run_speedify_cmd(['show', 'privacy']) @exception_wrapper("Failed to get settings") def show_settings(): return _run_speedify_cmd(['show', 'settings']) @exception_wrapper("Failed to get adapters") def show_adapters(): return _run_speedify_cmd(['show', 'adapters']) @exception_wrapper("Failed to do captiveportal_check") def captiveportal_check(): return _run_speedify_cmd(['captiveportal', 'check']) @exception_wrapper("Failed to do captiveportal_login") def captiveportal_login(proxy=True, adapterID = None): args = ['captiveportal', 'login'] startproxy = True if proxy == "on": args.append("on") elif proxy == "off": startproxy = False args.append("off") elif proxy: args.append("on") else: startproxy = False args.append("off") if adapterID and startproxy: args.append(adapterID) return _run_speedify_cmd(args) @exception_wrapper("Failed to get current server") def show_currentserver(): return _run_speedify_cmd(['show', 'currentserver']) @exception_wrapper("Failed to get current user") def show_user(): return _run_speedify_cmd(['show', 'user']) @exception_wrapper("Failed to show connect method") def show_connectmethod(): return _run_speedify_cmd(['show', 'connectmethod']) @exception_wrapper("getting state") def show_state(): jret = _run_speedify_cmd(['state']) return find_state_for_string(jret["state"]) @exception_wrapper("Failed to get version") def show_version(): return _run_speedify_cmd(['version']) @exception_wrapper("Failed to get stats") def safebrowsing_stats(): args = ["safebrowsing", "stats"] return _run_speedify_cmd(args) @exception_wrapper("Failed to set adapter priority") def adapter_priority(adapterID, priority=Priority.ALWAYS): args = ['adapter',"priority"] args.append(str(adapterID)) args.append((str(priority.value))) resultjson = _run_speedify_cmd(args) return resultjson @exception_wrapper("Failed to set adapter encryption") def adapter_encryption(adapterID, encrypt): args = ['adapter',"encryption"] args.append(str(adapterID)) if encrypt == "on": args.append("on") elif encrypt == "off": args.append("off") elif encrypt: args.append("on") else: args.append("off") resultjson = _run_speedify_cmd(args) return resultjson @exception_wrapper("Failed to set adapter ratelimit")
Apache License 2.0
ecordell/pymacaroons
pymacaroons/serializers/json_serializer.py
JsonSerializer.serialize
python
def serialize(self, m): from pymacaroons import macaroon if m.version == macaroon.MACAROON_V1: return self._serialize_v1(m) return self._serialize_v2(m)
Serialize the macaroon in JSON format indicated by the version field. @param macaroon the macaroon to serialize. @return JSON macaroon.
https://github.com/ecordell/pymacaroons/blob/abc36a67c3a5546caac1fc5e6a077ce23ee997e2/pymacaroons/serializers/json_serializer.py#L9-L18
import binascii import json from pymacaroons import utils class JsonSerializer(object):
MIT License
helios-protocol/py-helios-node
helios/protocol/common/normalizers.py
BaseNormalizer.normalize_result
python
def normalize_result(message: TResponsePayload) -> TResult: raise NotImplementedError()
Convert underlying peer message to final result
https://github.com/helios-protocol/py-helios-node/blob/691b378938f0a36bf8774dc1ee4e4370b6cf7c63/helios/protocol/common/normalizers.py#L25-L29
from abc import abstractmethod, ABC from typing import ( Generic, TypeVar, ) from hp2p.protocol import PayloadType from .types import ( TResponsePayload, TResult, ) class BaseNormalizer(ABC, Generic[TResponsePayload, TResult]): is_normalization_slow = False @staticmethod @abstractmethod
MIT License
gluufederation/cloud-native-edition
pygluu/kubernetes/terminal/aws.py
PromptAws.prompt_aws_lb
python
def prompt_aws_lb(self): lb_map = { 1: "clb", 2: "nlb", 3: "alb", } if self.settings.get("AWS_LB_TYPE") not in lb_map.values(): print("|-----------------------------------------------------------------------------|") print("| AWS Loadbalancer type |") print("|-----------------------------------------------------------------------------|") print("| [1] Classic Load Balancer (CLB) [default] |") print("| [2] Network Load Balancer (NLB - Alpha) -- Static IP |") print("| [3] Application Load Balancer (ALB - Alpha) DEV_ONLY |") print("| [ALB] Waiting for URL rewrite support by ALB controller |") print("| https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1571 |") print("|-----------------------------------------------------------------------------|") choice = click.prompt("Loadbalancer type", default=1) self.settings.set("AWS_LB_TYPE", lb_map.get(choice, "clb")) if self.settings.get("AWS_LB_TYPE") == "alb": logger.info("A prompt later during installation will appear to input the ALB DNS address") if not self.settings.get("USE_ARN"): self.settings.set("USE_ARN", confirm_yesno( "Are you terminating SSL traffic at LB and using certificate from AWS")) if not self.settings.get("VPC_CIDR") and self.settings.get("USE_ARN") == "Y": self.settings.set("AWS_VPC_CIDR", click.prompt( "Enter VPC CIDR in use for the Kubernetes cluster i.e 192.168.1.1/16", default="0.0.0.0/0" )) if not self.settings.get("ARN_AWS_IAM") and self.settings.get("USE_ARN") == "Y": self.settings.set("ARN_AWS_IAM", click.prompt( "Enter aws-load-balancer-ssl-cert arn quoted ('arn:aws:acm:us-west-2:XXXXXXXX:" "certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX')" ))
Prompts for AWS Load balancer information
https://github.com/gluufederation/cloud-native-edition/blob/79518cab216159d6c9398f879abd30db3680a90d/pygluu/kubernetes/terminal/aws.py#L25-L64
import click from pygluu.kubernetes.helpers import get_logger from pygluu.kubernetes.terminal.helpers import confirm_yesno logger = get_logger("gluu-prompt-aws ") class PromptAws: def __init__(self, settings): self.settings = settings
Apache License 2.0
naparuba/opsbro
data/core-configuration/packs/core-functions/module/system.py
system_is_python_3
python
def system_is_python_3(): return PY3
**system_is_python_3()** -> return True if the agent is running on python3, False otherwise <code> Example: system_is_python_3() Returns: False </code>
https://github.com/naparuba/opsbro/blob/98618a002cd47250d21e7b877a24448fc95fec80/data/core-configuration/packs/core-functions/module/system.py#L47-L58
import os import codecs from opsbro.evaluater import export_evaluater_function from opsbro.misc.lolcat import lolcat from opsbro.util import PY3 from opsbro.jsonmgr import jsoner if PY3: basestring = str FUNCTION_GROUP = 'system' @export_evaluater_function(function_group=FUNCTION_GROUP) def system_get_os(): import platform return platform.system().lower() @export_evaluater_function(function_group=FUNCTION_GROUP) def system_is_python_2(): return not PY3 @export_evaluater_function(function_group=FUNCTION_GROUP)
MIT License
googlecloudplatform/appstart
appstart/sandbox/configuration.py
ApplicationConfiguration.__init__
python
def __init__(self, config_file): self._verify_structure(config_file) if config_file.endswith('.yaml'): self._init_from_yaml_config(config_file) self.is_java = False elif os.path.basename(config_file) == 'appengine-web.xml': self._init_from_xml_config(config_file) self.is_java = True else: raise utils.AppstartAbort('{0} is not a valid ' 'configuration file. Use either a .yaml ' 'file or .xml file.'.format(config_file))
Initializer for ApplicationConfiguration. Args: config_file: (basestring) The absolute path to the configuration file. Raises: utils.AppstartAbort: If the config file neither ends with .yaml nor is an appengine-web.xml file.
https://github.com/googlecloudplatform/appstart/blob/f08d4867cd115c458b151b1414d9833fadc63bf1/appstart/sandbox/configuration.py#L26-L47
import os import xml.dom.minidom from xml.parsers import expat import yaml from .. import utils class ApplicationConfiguration(object):
Apache License 2.0
christoskap/multi_timescale_replay
sac.py
SAC.load
python
def load(cls, load_path, env=None, eval_env=None, custom_objects=None, **kwargs): data, params = cls._load_from_file_cloudpickle(load_path) if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']: raise ValueError("The specified policy kwargs do not equal the stored policy kwargs. " "Stored kwargs: {}, specified kwargs: {}".format(data['policy_kwargs'], kwargs['policy_kwargs'])) model = cls(policy=data["policy"], env=None, _init_setup_model=False) model.__dict__.update(data) model.__dict__.update(kwargs) model.set_env(env) model.set_eval_env(eval_env) model.setup_model() model.load_parameters(params) return model
Load the model from file :param load_path: (str or file-like) the saved parameter location :param env: (Gym Envrionment) the new environment to run the loaded model on (can be None if you only need prediction from a trained model) :param custom_objects: (dict) Dictionary of objects to replace upon loading. If a variable is present in this dictionary as a key, it will not be deserialized and the corresponding item will be used instead. Similar to custom_objects in `keras.models.load_model`. Useful when you have an object in file that can not be deserialized. :param kwargs: extra arguments to change the model when loading
https://github.com/christoskap/multi_timescale_replay/blob/0787a347f8e0500aa1dd12718e6716ea634b0110/sac.py#L685-L716
import sys import time import multiprocessing from collections import deque import warnings import os import cloudpickle from functools import partial import numpy as np import tensorflow as tf import replay_buffers from stable_baselines.a2c.utils import total_episode_reward_logger from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter from stable_baselines.common.vec_env import VecEnv from stable_baselines.deepq.replay_buffer import ReplayBuffer from stable_baselines.ppo2.ppo2 import safe_mean, get_schedule_fn from policies import SACPolicy, squashed_gaussian_likelihood from stable_baselines import logger def get_vars(scope): return tf_util.get_trainable_vars(scope) class SAC(OffPolicyRLModel): def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000, learning_starts=100, train_freq=1, batch_size=64, tau=0.005, ent_coef='auto', target_update_interval=1, gradient_steps=1, target_entropy='auto', action_noise=None, random_exploration=0.0, verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, env_adaptation_fn=None, env_adaptation_interval=1000, eval_env=None, eval_interval=1000, eval_env_params=None, num_eps_per_eval=1, replay_buffer_type='fifo', replay_buffer_params={}, irm_replay=False, irm_pol_coef=1.0): super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs) self.buffer_size = buffer_size self.learning_rate = learning_rate self.learning_starts = learning_starts self.train_freq = train_freq self.batch_size = batch_size self.tau = tau self.ent_coef = ent_coef self.target_update_interval = target_update_interval self.gradient_steps = gradient_steps self.gamma = gamma self.action_noise = action_noise self.random_exploration = random_exploration self.value_fn = None self.graph = None self.replay_buffer = None self.episode_reward = None self.sess = None self.tensorboard_log = tensorboard_log self.verbose = verbose self.params = None self.summary = None self.policy_tf = None self.target_entropy = target_entropy self.full_tensorboard_log = full_tensorboard_log self.obs_target = None self.target_policy = None self.actions_ph = None self.rewards_ph = None self.terminals_ph = None self.observations_ph = None self.action_target = None self.next_observations_ph = None self.value_target = None self.step_ops = None self.target_update_op = None self.infos_names = None self.entropy = None self.target_params = None self.learning_rate_ph = None self.processed_obs_ph = None self.processed_next_obs_ph = None self.log_ent_coef = None self.env_adaptation_fn = env_adaptation_fn self.env_adaptation_interval = env_adaptation_interval self.eval_env = eval_env self.eval_interval = eval_interval self.eval_env_params = eval_env_params self.num_eps_per_eval = num_eps_per_eval self.replay_buffer_type = replay_buffer_type self.replay_buffer_params = replay_buffer_params self.irm_replay = irm_replay self.irm_pol_coef = irm_pol_coef if _init_setup_model: self.setup_model() def _get_pretrain_placeholders(self): policy = self.policy_tf deterministic_action = self.deterministic_action * np.abs(self.action_space.low) return policy.obs_ph, self.actions_ph, deterministic_action def setup_model(self): with SetVerbosity(self.verbose): self.graph = tf.Graph() with self.graph.as_default(): n_cpu = multiprocessing.cpu_count() if sys.platform == 'darwin': n_cpu //= 2 self.sess = tf_util.make_session(num_cpu=n_cpu, graph=self.graph) replay_buffer_fn = replay_buffers.get_replay_buffer(self.replay_buffer_type) self.replay_buffer = replay_buffer_fn(**self.replay_buffer_params) if self.replay_buffer_type == 'multi_timescale' or self.replay_buffer_type == 'refer_multi_timescale': self.num_mtr_buffers = self.replay_buffer_params['num_buffers'] else: self.num_mtr_buffers = 1 with tf.variable_scope("input", reuse=False): self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) self.observations_ph = self.policy_tf.obs_ph self.processed_obs_ph = self.policy_tf.processed_obs self.next_observations_ph = self.target_policy.obs_ph self.processed_next_obs_ph = self.target_policy.processed_obs self.action_target = self.target_policy.action_ph self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals') self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph") self.irm_env_split_idxes = tf.placeholder(tf.int64, shape=(self.num_mtr_buffers + 1,), name="irm_env_split_idxes") self.dummy_pol = tf.constant(1.0, name="dummy_pol") with tf.variable_scope("model", reuse=False): self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph) self.entropy = tf.reduce_mean(self.policy_tf.entropy) qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph, create_qf=True, create_vf=True) qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph, policy_out, create_qf=True, create_vf=False, reuse=True) if self.target_entropy == 'auto': self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32) else: self.target_entropy = float(self.target_entropy) if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'): init_value = 1.0 if '_' in self.ent_coef: init_value = float(self.ent_coef.split('_')[1]) assert init_value > 0., "The initial value of ent_coef must be greater than 0" self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32, initializer=np.log(init_value).astype(np.float32)) self.ent_coef = tf.exp(self.log_ent_coef) else: self.ent_coef = float(self.ent_coef) with tf.variable_scope("target", reuse=False): _, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph, create_qf=False, create_vf=True) self.value_target = value_target with tf.variable_scope("loss", reuse=False): min_qf_pi = tf.minimum(qf1_pi, qf2_pi) q_backup = tf.stop_gradient( self.rewards_ph + (1 - self.terminals_ph) * self.gamma * self.value_target ) qf1_unreduced_loss = 0.5 * (q_backup - qf1) ** 2 qf2_unreduced_loss = 0.5 * (q_backup - qf2) ** 2 qf1_loss = tf.reduce_mean(qf1_unreduced_loss) qf2_loss = tf.reduce_mean(qf2_unreduced_loss) ent_coef_loss, entropy_optimizer = None, None if not isinstance(self.ent_coef, float): ent_coef_loss = -tf.reduce_mean( self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy)) entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - tf.squeeze(qf1_pi)) policy_loss = policy_kl_loss v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi) value_unreduced_loss = 0.5 * (value_fn - v_backup) ** 2 value_loss = tf.reduce_mean(value_unreduced_loss) values_losses = qf1_loss + qf2_loss + value_loss def squared_grad_norm_fn(dummy): return lambda y: tf.reduce_mean(tf.square(tf.gradients(y, dummy))) unreduced_policy_losses = self.ent_coef * self.policy_tf.get_irm_logp_pi(dummy=self.dummy_pol) - tf.squeeze(qf1_pi) policy_loss_by_env = tf.split(self.ent_coef * self.policy_tf.get_irm_logp_pi(dummy=self.dummy_pol) - tf.squeeze(qf1_pi), self.irm_env_split_idxes)[1:] policy_loss_grad_mags_by_env = [squared_grad_norm_fn(self.dummy_pol)(split) for split in policy_loss_by_env] policy_irm_loss = self.irm_pol_coef * tf.reduce_mean(policy_loss_grad_mags_by_env) if self.irm_replay and self.irm_pol_coef is not 0: policy_loss += policy_irm_loss policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) policy_train_op = policy_optimizer.minimize(policy_loss, var_list=get_vars('model/pi')) value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) values_params = get_vars('model/values_fn') source_params = get_vars("model/values_fn/vf") target_params = get_vars("target/values_fn/vf") self.target_update_op = [ tf.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] target_init_op = [ tf.assign(target, source) for target, source in zip(target_params, source_params) ] policy_train_ops = [policy_train_op] with tf.control_dependencies(policy_train_ops): train_values_op = value_optimizer.minimize(values_losses, var_list=values_params) self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy'] self.step_ops = [policy_loss, qf1_loss, qf2_loss, value_loss, qf1, qf2, value_fn, logp_pi, self.entropy, policy_train_op, train_values_op] if ent_coef_loss is not None: with tf.control_dependencies([train_values_op]): ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef) self.infos_names += ['ent_coef_loss', 'ent_coef'] self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef] if self.irm_replay: if self.irm_pol_coef is not 0: self.infos_names += ['irm_policy_loss'] self.step_ops += [policy_irm_loss] tf.summary.scalar('policy_loss', policy_loss) tf.summary.scalar('qf1_loss', qf1_loss) tf.summary.scalar('qf2_loss', qf2_loss) tf.summary.scalar('value_loss', value_loss) tf.summary.scalar('entropy', self.entropy) if self.irm_replay: if self.irm_pol_coef is not 0: tf.summary.scalar('irm policy loss', policy_irm_loss) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph)) self.params = get_vars("model") self.target_params = get_vars("target/values_fn/vf") with self.sess.as_default(): self.sess.run(tf.global_variables_initializer()) self.sess.run(target_init_op) self.summary = tf.summary.merge_all() def _train_step(self, step, writer, learning_rate): batch = self.replay_buffer.sample(self.batch_size) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch feed_dict = { self.observations_ph: batch_obs, self.actions_ph: batch_actions, self.next_observations_ph: batch_next_obs, self.rewards_ph: batch_rewards.reshape(self.batch_size, -1), self.terminals_ph: batch_dones.reshape(self.batch_size, -1), self.learning_rate_ph: learning_rate } if self.irm_replay: irm_splits = self.replay_buffer.get_buffer_batch_sizes(self.batch_size) feed_dict[self.irm_env_split_idxes] = irm_splits if writer is not None: out = self.sess.run([self.summary] + self.step_ops, feed_dict) summary = out.pop(0) writer.add_summary(summary, step) else: out = self.sess.run(self.step_ops, feed_dict) policy_loss, qf1_loss, qf2_loss, value_loss, *values = out entropy = values[4] return_values = (policy_loss, qf1_loss, qf2_loss, value_loss, entropy) if self.log_ent_coef is not None: ent_coef_loss, ent_coef = values[8:10] return_values += (ent_coef_loss, ent_coef) if self.irm_replay: if 'irm_policy_loss' in self.infos_names: return_values += (values[self.infos_names.index('irm_policy_loss') + 3],) return return_values def learn(self, total_timesteps, callback=None, seed=None, log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None): new_tb_log = self._init_num_timesteps(reset_num_timesteps) if replay_wrapper is not None: self.replay_buffer = replay_wrapper(self.replay_buffer) with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) as writer: self._setup_learn(seed) self.learning_rate = get_schedule_fn(self.learning_rate) current_lr = self.learning_rate(1) start_time = time.time() episode_rewards = [0.0] episode_successes = [] if self.action_noise is not None: self.action_noise.reset() if self.env_adaptation_fn is not None: self.env.set_env_params(self.env_adaptation_fn(0, num_timesteps=total_timesteps)) obs = self.env.reset() if self.eval_env is not None: if self.eval_env_params is not None: self.eval_env.set_env_params(self.eval_env_params[0]) eval_obs = self.eval_env.reset() self.episode_reward = np.zeros((1,)) ep_info_buf = deque(maxlen=100) n_updates = 0 infos_values = [] for step in range(total_timesteps): if callback is not None: if callback(locals(), globals()) is False: break if (self.env_adaptation_fn is not None) and (step % self.env_adaptation_interval == 0): env_params = self.env_adaptation_fn(step, num_timesteps=total_timesteps) self.env.set_env_params(env_params) print("env_params: ", env_params) if (self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration): rescaled_action = action = self.env.action_space.sample() else: action = self.policy_tf.step(obs[None], deterministic=False).flatten() if self.action_noise is not None: action = np.clip(action + self.action_noise(), -1, 1) rescaled_action = action * np.abs(self.action_space.low) assert action.shape == self.env.action_space.shape new_obs, reward, done, info = self.env.step(rescaled_action) self.replay_buffer.add(obs, action, reward, new_obs, float(done)) obs = new_obs maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_info_buf.extend([maybe_ep_info]) if writer is not None: ep_reward = np.array([reward]).reshape((1, -1)) ep_done = np.array([done]).reshape((1, -1)) self.episode_reward = total_episode_reward_logger(self.episode_reward, ep_reward, ep_done, writer, self.num_timesteps) if step % self.train_freq == 0: mb_infos_vals = [] for grad_step in range(self.gradient_steps): if not self.replay_buffer.can_sample(self.batch_size) or self.num_timesteps < self.learning_starts: break n_updates += 1 frac = 1.0 - step / total_timesteps current_lr = self.learning_rate(frac) mb_infos_vals.append(self._train_step(step, writer, current_lr)) if (step + grad_step) % self.target_update_interval == 0: self.sess.run(self.target_update_op) if len(mb_infos_vals) > 0: infos_values = np.mean(mb_infos_vals, axis=0) episode_rewards[-1] += reward if done: if self.action_noise is not None: self.action_noise.reset() if not isinstance(self.env, VecEnv): obs = self.env.reset() episode_rewards.append(0.0) maybe_is_success = info.get('is_success') if maybe_is_success is not None: episode_successes.append(float(maybe_is_success)) if len(episode_rewards[-101:-1]) == 0: mean_reward = -np.inf else: mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1) num_episodes = len(episode_rewards) if (self.eval_env is not None) and (num_episodes % self.eval_interval) and done: eval_ep_reward_means = [] eval_ep_reward_stds = [] eval_mean_ep_lengths = [] for eval_num in range(len(self.eval_env_params)): curr_eval_env_params = self.eval_env_params[eval_num] self.eval_env.set_env_params(curr_eval_env_params) eval_ep = 0 eval_ep_rewards = [] eval_ep_lengths = [] while eval_ep < self.num_eps_per_eval: t_eval = 0 eval_ep_reward = 0 eval_done = False while not eval_done: eval_action,_ = self.predict(eval_obs) eval_obs, eval_r, eval_done, eval_info = self.eval_env.step(eval_action) eval_ep_reward += eval_r t_eval += 1 eval_ep_lengths.append(t_eval) eval_ep_rewards.append(eval_ep_reward) eval_ep += 1 eval_ep_reward_means.append(np.mean(eval_ep_rewards)) eval_ep_reward_stds.append(np.std(eval_ep_rewards)) eval_mean_ep_lengths.append(np.mean(eval_ep_lengths)) self.num_timesteps += 1 if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0: fps = int(step / (time.time() - start_time)) logger.logkv("episodes", num_episodes) logger.logkv("mean 100 episode reward", mean_reward) if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0: logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf])) logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf])) logger.logkv("n_updates", n_updates) logger.logkv("current_lr", current_lr) logger.logkv("fps", fps) logger.logkv('time_elapsed', int(time.time() - start_time)) logger.logkv("replay_buffer_size", len(self.replay_buffer)) if len(episode_successes) > 0: logger.logkv("success rate", np.mean(episode_successes[-100:])) if len(infos_values) > 0: for (name, val) in zip(self.infos_names, infos_values): logger.logkv(name, val) logger.logkv("total timesteps", self.num_timesteps) if self.env_adaptation_fn is not None: for (k, v) in env_params.items(): logger.logkv("env_params/"+k, v) if self.eval_env is not None: for eval_num in range(len(eval_ep_reward_means)): logger.logkv("eval_"+str(eval_num)+" ep_rewmean", eval_ep_reward_means[eval_num]) logger.logkv("eval_"+str(eval_num)+" ep_rewstd", eval_ep_reward_stds[eval_num]) logger.logkv("eval_"+str(eval_num)+" ep_lenmean", eval_mean_ep_lengths[eval_num]) logger.dumpkvs() infos_values = [] return self def action_probability(self, observation, state=None, mask=None, actions=None, logp=False): if actions is not None: raise ValueError("Error: SAC does not have action probabilities.") warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it " "is squashed by a tanh before being scaled and ouputed.") return None def predict(self, observation, state=None, mask=None, deterministic=True): observation = np.array(observation) vectorized_env = self._is_vectorized_observation(observation, self.observation_space) observation = observation.reshape((-1,) + self.observation_space.shape) actions = self.policy_tf.step(observation, deterministic=deterministic) actions = actions.reshape((-1,) + self.action_space.shape) actions = actions * np.abs(self.action_space.low) if not vectorized_env: actions = actions[0] return actions, None def get_parameter_list(self): return (self.params + self.target_params) def save(self, save_path, cloudpickle=False): data = { "learning_rate": self.learning_rate, "buffer_size": self.buffer_size, "learning_starts": self.learning_starts, "train_freq": self.train_freq, "batch_size": self.batch_size, "tau": self.tau, "ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto', "target_entropy": self.target_entropy, "gamma": self.gamma, "verbose": self.verbose, "observation_space": self.observation_space, "action_space": self.action_space, "policy": self.policy, "n_envs": self.n_envs, "action_noise": self.action_noise, "random_exploration": self.random_exploration, "_vectorize_action": self._vectorize_action, "policy_kwargs": self.policy_kwargs, "env_adaptation_fn": self.env_adaptation_fn, "env_adaptation_interval": self.env_adaptation_interval, "eval_interval": self.eval_interval, "eval_env_params": self.eval_env_params, "num_eps_per_eval": self.num_eps_per_eval, "replay_buffer_type": self.replay_buffer_type, "replay_buffer_params": self.replay_buffer_params, "irm_replay": self.irm_replay, "irm_pol_coef": self.irm_pol_coef, } params_to_save = self.get_parameters() self._save_to_file(save_path, data=data, params=params_to_save) @classmethod
MIT License
google-research/language
language/compir/dataset_parsers/cfq_parser.py
CfqParser._get_trimmed_relations
python
def _get_trimmed_relations(self): relations = set() for example in self.train_examples: tokens = example.program.split() for token in tokens: if "." in token and token != ".": relations.add(token) trimmed_relations = {} relations_original = set(relations) for relation in relations_original: if "ns:" in relation and not relation.startswith("#"): relation_trimmed = relation.split("ns:")[-1] if relation_trimmed in relations: raise RuntimeError( "The trimmed relation {} is not unique!".format(relation_trimmed)) relations.add(relation_trimmed) trimmed_relations[relation] = relation_trimmed return trimmed_relations
Gets a mapping between relations and their short naming.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/compir/dataset_parsers/cfq_parser.py#L57-L77
import collections import re from language.compir.dataset_parsers import dataset_parser class CfqParser(dataset_parser.DatasetParserInterface): def __init__(self, train_examples_raw, test_examples_raw): super().__init__(train_examples_raw, test_examples_raw) self.trimmed_relations = self._get_trimmed_relations() self.trimmed_relations_inv = { rel_short: rel_long for rel_long, rel_short in self.trimmed_relations.items() } def preprocess_program(self, program): program_processed = str(program) program_processed = program_processed.replace("{", "lb") program_processed = program_processed.replace("}", "rb") program_processed = program_processed.replace("^", "#") return program_processed def _get_program_parts(self, program): if not program.endswith(" rb"): raise ValueError("Wrong program format.") program_no_closing = program[:-3] parts = program_no_closing.split(" lb ") if len(parts) != 2: raise ValueError("Wrong program format.") prefix = parts[0] conjuncts_str = parts[1] conjuncts = conjuncts_str.split(" . ") return prefix, conjuncts
Apache License 2.0
modoboa/modoboa-amavis
modoboa_amavis/handlers.py
on_domain_alias_deleted
python
def on_domain_alias_deleted(sender, instance, **kwargs): delete_user("@{0}".format(instance.name))
Delete user for domain alias.
https://github.com/modoboa/modoboa-amavis/blob/18e5a210ac2eb007ce28d70675f4188d93e1b822/modoboa_amavis/handlers.py#L68-L70
from __future__ import unicode_literals from django.db.models import signals from django.dispatch import receiver from django.template import Context, Template from django.urls import reverse from django.utils.translation import ugettext as _ from modoboa.admin import models as admin_models, signals as admin_signals from modoboa.core import signals as core_signals from modoboa.lib import signals as lib_signals from modoboa.parameters import tools as param_tools from . import forms from .lib import ( create_user_and_policy, create_user_and_use_policy, delete_user, delete_user_and_policy, update_user_and_policy ) from .models import Policy, Users from .sql_connector import SQLconnector @receiver(core_signals.extra_user_menu_entries) def menu(sender, location, user, **kwargs): if location == "top_menu": return [ {"name": "quarantine", "label": _("Quarantine"), "url": reverse("modoboa_amavis:index")} ] return [] @receiver(signals.post_save, sender=admin_models.Domain) def manage_domain_policy(sender, instance, **kwargs): if kwargs.get("created"): create_user_and_policy("@{0}".format(instance.name)) else: update_user_and_policy( "@{0}".format(instance.oldname), "@{0}".format(instance.name) ) @receiver(signals.pre_delete, sender=admin_models.Domain) def on_domain_deleted(sender, instance, **kwargs): delete_user_and_policy("@{0}".format(instance.name)) @receiver(signals.post_save, sender=admin_models.DomainAlias) def on_domain_alias_created(sender, instance, **kwargs): if not kwargs.get("created"): return create_user_and_use_policy( "@{0}".format(instance.name), "@{0}".format(instance.target.name) ) @receiver(signals.pre_delete, sender=admin_models.DomainAlias)
MIT License
vincent-lg/tsunami
src/secondaires/navigation/equipage/matelot.py
Matelot.invalider_ordres
python
def invalider_ordres(self, cle): for ordre in list(self.ordres): if ordre.cle == cle: self.ordres.remove(ordre) ordre.invalide = True
Invalide les ordres.
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/navigation/equipage/matelot.py#L226-L231
from abstraits.obase import BaseObj from primaires.objet.objet import MethodeObjet from secondaires.navigation.equipage.signaux import * from secondaires.navigation.equipage.postes import postes from .ordre import * class Matelot(BaseObj): logger = type(importeur).man_logs.get_logger("ordres") def __init__(self, equipage, personnage): BaseObj.__init__(self) self.equipage = equipage self.personnage = personnage self.nom_poste = "matelot" self.confiance = 0 self.affectation = None self.ordres = [] def __getnewargs__(self): return (None, None) def __getattr__(self, nom_attr): try: attribut = getattr(type(self.personnage), nom_attr) assert callable(attribut) return MethodeObjet(attribut, self) except (AttributeError, AssertionError): return getattr(self.personnage, nom_attr) def __repr__(self): return "<mâtelot {} ({})>".format(repr(self.nom), str(self.personnage)) @property def poste(self): return postes[self.nom_poste] @property def navire(self): if self.equipage: return self.equipage.navire else: return None def get_ordre(self, cle_ordre): ordres = [o for o in self.ordres if o.cle == cle_ordre] if ordres: return ordres[0] return None def nettoyer_ordres(self): volontes = self.equipage.volontes uniques = [] args = [] for ordre in self.ordres: if ordre.volonte and ordre.volonte not in volontes: continue arg = (ordre.cle, ) + ordre.arguments_suplementaires if arg not in args: args.append(arg) uniques.append(ordre) self.ordres[:] = uniques def executer_ordres(self, priorite=1): if self.ordres: ordre = self.ordres[0] generateur = ordre.creer_generateur() self.executer_generateur(generateur) def executer_generateur(self, generateur, profondeur=0): indent = " " * profondeur msg = "Exécution du générateur : {}".format(generateur) self.logger.debug(indent + msg) ordre = generateur.ordre volonte = ordre.volonte matelot = ordre.matelot personnage = matelot.personnage if ordre.invalide: self.logger.debug(indent + "{} est invalidé".format(ordre)) return if ordre.peut_deleguer and (personnage is None or personnage.est_mort()): self.logger.debug(indent + "{} est mort".format(personnage)) matelot.relayer_ordres() matelot.ordres[:] = [] return etats = [etat.cle for etat in personnage.etats] if ordre.etats_autorises != ("*", ) and any(cle not in ordre.etats_autorises for cle in etats) and ordre.peut_deleguer: self.logger.debug(indent + "{} est occupé ({} {})".format( personnage, personnage.etats, ordre.etats_autorises)) matelot.relayer_ordres() matelot.ordres[:] = [] return try: signal = next(generateur) except StopIteration: matelot.ordres[:] = [] return self.logger.debug(indent + "Signal {} reçu".format(signal)) if signal is None: self.executer_generateur(generateur, profondeur) elif isinstance(signal, (int, float)): tps = signal nom = "ordres_{}".format(id(generateur)) self.logger.debug(indent + "Pause pendant {} secondes".format( tps)) importeur.diffact.ajouter_action(nom, tps, self.executer_generateur, generateur, profondeur) else: signal.traiter(generateur, profondeur) def ordonner(self, ordre): self.ordres.append(ordre) def relayer_ordres(self): volontes = [] for ordre in self.ordres: if ordre.volonte and ordre.volonte not in volontes: volontes.append(ordre.volonte) for volonte in volontes: self.equipage.demander(volonte.cle, *volonte.arguments, exception=self)
BSD 3-Clause New or Revised License
unexpectedpanda/retool
modules/titleutils.py
remove_regions
python
def remove_regions(title, region_data): return title.replace(re.search(' \(((.*?,){0,} {0,})(' + '|'.join(region_data.all) + ').*?\)', title)[0],'')
Removes regions from the input title, given the title and region_data object
https://github.com/unexpectedpanda/retool/blob/df4c3d9a95a85db8aeee35f8657d248ce38c58d8/modules/titleutils.py#L135-L138
import re import sys from modules.utils import Font def check_date(string, title): months = [ 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december' ] if re.search('|'.join(months), title) != None: for i, month in enumerate(months): if (i < 8): title = re.sub(f'{month}, ', f'0{i + 1}-01-', title) else: title = re.sub(f'{month}, ', f'{i + 1}-01-', title) if re.search('\(\d{2}-\d{2}-\d{4}\)', title) != None: us_date = True else: us_date = False if re.search('\(\d{2}-\d{2}-\d{2}\)', title) != None: short_date = True else: short_date = False title = title.replace(re.search(string, title)[0], re.search(string, title)[0].replace('-', '')) if short_date == True: string = '\(\d{6}\)' year = 1900 + int(re.search(string, title).group()[1:-5]) month = re.search(string, title).group()[3:-3] day = re.search(string, title).group()[5:-1] elif us_date == True: year = re.search(string, title).group()[5:-1] month = re.search(string, title).group()[1:-7] day = re.search(string, title).group()[3:-5] else: year = re.search(string, title).group()[1:-5] month = re.search(string, title).group()[5:-3] day = re.search(string, title).group()[7:-1] if ( int(year) >= 1970 and int(month) >= 1 and int(month) <= 12 and int(day) >= 1 and int(day) <= 31): return int(year + month + day) else: return False def get_title_count(titles, is_folder): final_title_count = 0 if len(titles.all) == 0: if is_folder == False: sys.exit() else: return 0 else: for group, disc_titles in titles.all.items(): for title in disc_titles: final_title_count += 1 return final_title_count def get_languages(title, REGEX_LANGUAGES): languages = re.search(REGEX_LANGUAGES, title) if languages != None: return languages[0][2:-1] else: return '' def get_raw_title(title): if title.find('(') != -1: return title[:(title.find('(') - 1)].rstrip().lower() else: return title.rstrip().lower() def get_short_name(region_data, REGEX, tag_free_name=False, full_name=False, user_input=None): if tag_free_name == False: tag_free_name = get_tag_free_name(full_name, user_input, REGEX) short_name = remove_regions(remove_languages(tag_free_name, REGEX.languages), region_data) return short_name def get_tag_free_name(title, user_input, REGEX): tag_free_name = title for key, value in user_input.tag_strings.disc_rename.items(): if key in tag_free_name: tag_free_name = tag_free_name.replace(key, value) tag_free_name = remove_tags(tag_free_name, user_input, REGEX) return tag_free_name def remove_languages(title, REGEX_LANGUAGES): no_languages = re.search(REGEX_LANGUAGES, title) if no_languages != None: return title.replace(no_languages[0], '') else: return title
BSD 3-Clause New or Revised License
llvm-mirror/zorg
zorg/jenkins/build.py
derive_lldb
python
def derive_lldb(): derive(tree='lldb', repos=['lldb', 'llvm', 'clang'])
Build a derived src tree for LLDB
https://github.com/llvm-mirror/zorg/blob/b78b0c96bff39702d901a22a4198dfa7f02e9907/zorg/jenkins/build.py#L819-L821
import sys import logging import os import subprocess import datetime import time import argparse import shutil import math import re import xml.etree.ElementTree as ET from contextlib import contextmanager from urllib2 import urlopen, URLError, HTTPError SERVER = "labmaster2.lab.llvm.org" NINJA = "/usr/local/bin/ninja" here = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.abspath(here + "/../../dep/")) import dep def readme_name(repo): if repo == "libcxx": return "LICENSE.TXT" return "README.txt" def next_section(name): footer() header(name) def header(name): print "@@@", name, "@@@" def footer(): print "Completed at: " + time.strftime("%FT%T") print "@@@@@@" def quote_sh_string(string): return "\\'".join("'" + p + "'" for p in string.split("'")) class Configuration(object): def __init__(self, args): super(Configuration, self).__init__() self._args = args self.workspace = os.environ.get('WORKSPACE', os.getcwd()) self._src_dir = os.environ.get('SRC_DIR', 'llvm') self._lldb_src_dir = os.environ.get('LLDB_SRC_DIR', 'lldb') self._build_dir = os.environ.get('BUILD_DIR', 'clang-build') self._lldb_build_dir = os.environ.get('LLDB_BUILD_DIR', 'lldb-build') self._install_dir = os.environ.get('INSTALL_DIR', 'clang-install') self.j_level = os.environ.get('J_LEVEL', None) self.max_parallel_tests = os.environ.get('MAX_PARALLEL_TESTS', None) self.max_parallel_links = os.environ.get('MAX_PARALLEL_LINKS', None) self.host_compiler_url = os.environ.get('HOST_URL', 'http://labmaster2.local/artifacts/') self.artifact_url = os.environ.get('ARTIFACT', 'NONE') self.job_name = os.environ.get('JOB_NAME', 'NONE') self.build_id = os.environ.get('BUILD_ID', 'NONE') self.build_number = os.environ.get('BUILD_NUMBER', 'NONE') self.svn_rev = os.environ.get('LLVM_REV', 'NONE') self.nobootstrap = True self.device = None self._svn_url_cache = None self.node_name = os.environ.get('NODE_NAME', None) self.lldb_test_archs = os.environ.get('LLDB_TEST_ARCHS', 'x86_64').split() self.__dict__.update(vars(args)) def builddir(self): return os.path.join(self.workspace, self._build_dir) def srcdir(self): return os.path.join(self.workspace, self._src_dir) def lldbbuilddir(self): return os.path.join(self.workspace, self._lldb_build_dir) def lldbsrcdir(self): return os.path.join(self.workspace, self._lldb_src_dir) def installdir(self): return os.path.join(self.workspace, self._install_dir) def CC(self): cc_basedir = os.path.join(self.workspace, 'host-compiler/') if os.path.exists(cc_basedir): clang_exec_path = os.path.join(cc_basedir, 'bin/clang') assert os.path.exists(clang_exec_path), "host-compiler present," " but has no clang executable." return clang_exec_path else: return False def liblto(self): cc_basedir = os.path.join(self.workspace, 'host-compiler/') if os.path.exists(cc_basedir): clang_liblto_path = os.path.join(cc_basedir, 'lib/') assert os.path.exists(clang_liblto_path), "host-compiler present," " but has no liblto." return clang_liblto_path else: return False def branch(self): try: return os.environ['BRANCH'] except: assert self._svn_url is not None BRANCH_MARKER = "/branches/" if BRANCH_MARKER in self._svn_url: wo_branch = self._svn_url.split(BRANCH_MARKER, 1)[1] branch = wo_branch.rsplit("@", 1)[0] return branch else: return "master" @property def _svn_url(self): if self._svn_url_cache: return self._svn_url_cache svn_url = os.environ.get('SVN_URL', os.environ.get('SVN_URL_1', None)) if svn_url is None: svn_url = self.grab_svn_url() self._svn_url_cache = svn_url return svn_url def grab_svn_url(self): if os.environ.get('TESTING', False): return '/foo/workspace/llvm.src' cmd = ['svn', 'info', '--xml', os.path.join(self.workspace, 'llvm.src')] out = run_collect_output(cmd) x = ET.fromstring(out) url = x.find('entry').find('url').text return url def link_memory_usage(self): usages = {'master': 3.5} if self.branch() == 'master': return usages['master'] else: raise NotImplementedError( "Unknown link memory usage." + self.branch()) conf = None def update_svn_checkout(working_dir): next_section("SVN upgrade") out = "" try: run_collect_output(["/usr/bin/xcrun", "svn", "upgrade"], working_dir=working_dir) except subprocess.CalledProcessError as e: msg = """Process return code: {}\n The working path was: {}\n The error was: {}.\n""" msg = msg.format(e.returncode, working_dir, out) print msg def cmake_builder(target): check_repo_state(conf.workspace) if not os.getenv("TESTING"): dep.parse_dependencies([here + "/clang_build_dependencies.dep"]) env = [] dyld_path = "" if conf.lto and conf.liblto(): dyld_path = conf.liblto() env.extend(["env", "DYLD_LIBRARY_PATH=" + dyld_path]) cmake_cmd = env + ["/usr/local/bin/cmake", "-G", "Ninja", '-DCMAKE_MAKE_PROGRAM=' + NINJA, "-DCMAKE_INSTALL_PREFIX=" + conf.installdir(), conf.srcdir()] compiler_flags = conf.compiler_flags max_parallel_links = conf.max_parallel_links if conf.lto: if conf.thinlto: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS=1"] else: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS=" + str(max_link_jobs())] cmake_cmd += ['-DLLVM_BUILD_EXAMPLES=Off'] if not max_parallel_links: max_parallel_links = 1 if dyld_path: cmake_cmd += ['-DDYLD_LIBRARY_PATH=' + dyld_path] else: cmake_cmd += ['-DLLVM_ENABLE_LTO=Off'] cmake_cmd += ['-DLLVM_BUILD_EXAMPLES=On'] cmake_cmd += ["-DCMAKE_MACOSX_RPATH=On"] libtool_path = query_sys_tool("macosx", "libtool") if libtool_path: cmake_cmd += ['-DCMAKE_LIBTOOL=' + libtool_path] if compiler_flags: cmake_cmd += ["-DCMAKE_C_FLAGS={}".format(' '.join(compiler_flags)), "-DCMAKE_CXX_FLAGS={}".format(' '.join(compiler_flags))] if max_parallel_links is not None: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS={}".format(max_parallel_links)] if conf.CC(): cmake_cmd += ['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"] if conf.cmake_build_type: cmake_cmd += ["-DCMAKE_BUILD_TYPE=" + conf.cmake_build_type] elif conf.debug: cmake_cmd += ["-DCMAKE_BUILD_TYPE=Debug"] else: cmake_cmd += ["-DCMAKE_BUILD_TYPE=Release"] cmake_cmd += ["-DLLVM_BUILD_EXTERNAL_COMPILER_RT=On"] for flag in conf.cmake_flags: cmake_cmd += [flag] if conf.assertions: cmake_cmd += ["-DLLVM_ENABLE_ASSERTIONS=On"] else: cmake_cmd += ["-DLLVM_ENABLE_ASSERTIONS=Off"] if conf.globalisel: cmake_cmd += ["-DLLVM_BUILD_GLOBAL_ISEL=ON"] if conf.svn_rev != 'NONE': cmake_cmd += ["-DSVN_REVISION={}".format(conf.svn_rev)] lit_flags = ['--xunit-xml-output=testresults.xunit.xml', '-v', '--timeout=600'] if conf.max_parallel_tests: lit_flags += ['-j', conf.max_parallel_tests] cmake_cmd += ['-DLLVM_LIT_ARGS={}'.format(' '.join(lit_flags))] ninja_cmd = env + ["/usr/local/bin/ninja", '-v'] if conf.j_level is not None: ninja_cmd += ["-j", conf.j_level] if target == 'all' or target == 'build': header("Cmake") run_cmd(conf.builddir(), cmake_cmd) footer() header("Ninja build") passed_target = conf.cmake_build_targets build_target = passed_target if passed_target else ['all'] run_cmd(conf.builddir(), ninja_cmd + build_target) footer() if conf.noinstall: header("Skip install") else: header("Ninja install") run_cmd(conf.builddir(), ninja_cmd + ['install']) build_upload_artifact() footer() ninja_cmd.extend(['-k', '0']) if target == 'all' or target == 'test' or target == 'testlong': header("Ninja test") targets = [ 'check-all'] if target == 'testlong' or target == 'all' else conf.cmake_test_targets if not targets: targets = ['check', 'check-clang'] run_cmd(conf.builddir(), ninja_cmd + targets) footer() def clang_builder(target): check_repo_state(conf.workspace) run_ws(['sh', '-c', 'rm -rfv *gz']) if target == "all" or target == "build": run_ws(['rm', '-rf', 'clang.roots']) debug_src_dir = 'debuginfo-tests.src' sdk_name = 'macosx' sdkroot = query_sdk_path(sdk_name) libtool_path = query_sys_tool(sdk_name, "libtool") next_section("Setup debug-info tests") run_ws(['rm', '-rf', 'llvm/tools/clang/test/debuginfo-tests']) run_cmd(os.path.join(conf.workspace, 'llvm/tools/clang/test'), ['ln', '-sf', os.path.join(conf.workspace, debug_src_dir), 'debuginfo-tests']) project = 'clang' clang_br = os.path.join(conf.workspace, conf._build_dir) next_section("Build Directory") run_ws(["mkdir", "-p", clang_br]) toolchain = '/Applications/Xcode.app/Contents/Developer' '/Toolchains/XcodeDefault.xctoolchain' env = [] dyld_path = "" if conf.lto and conf.liblto(): dyld_path = conf.liblto() env.extend(["env", "DYLD_LIBRARY_PATH=" + dyld_path]) next_section("Build Clang") if conf.nobootstrap: if conf.debug or conf.device: assert False, "Invalid parameter for clang-builder." run_cmd(clang_br, ['mkdir', './Build', './Root']) install_prefix = conf.installdir() cmake_cachefile_thinlto = '' if conf.thinlto: cmake_cachefile_thinlto = '-ThinLTO' cmake_cachefile = '{}/llvm/tools/clang/cmake/caches/Apple-stage2{}.cmake'.format( conf.workspace, cmake_cachefile_thinlto) cmake_command = env + ["/usr/local/bin/cmake", '-G', 'Ninja', '-C', cmake_cachefile, '-DLLVM_ENABLE_ASSERTIONS:BOOL={}'.format( "TRUE" if conf.assertions else "FALSE"), '-DCMAKE_BUILD_TYPE=RelWithDebInfo', '-DCMAKE_MAKE_PROGRAM=' + NINJA, '-DLLVM_VERSION_PATCH=99', '-DLLVM_VERSION_SUFFIX=""', '-DLLVM_BUILD_EXTERNAL_COMPILER_RT=On', '-DCLANG_COMPILER_RT_CMAKE_ARGS={}/llvm/projects/compiler-rt/cmake/caches/Apple.cmake'.format( conf.workspace), '-DCOMPILER_RT_BUILD_SANITIZERS=On', '-DCMAKE_INSTALL_PREFIX={}'.format( install_prefix), '-DLLVM_REPOSITORY={}'.format(conf._svn_url), '-DCLANG_REPOSITORY_STRING={}'.format( conf.branch()), '-DCLANG_APPEND_VC_REV=On', '-DSVN_REVISION={}'.format(conf.svn_rev), '-DLLVM_BUILD_TESTS=On', '-DLLVM_INCLUDE_TESTS=On', '-DCLANG_INCLUDE_TESTS=On', '-DLLVM_INCLUDE_UTILS=On', '-DLIBCXX_INSTALL_HEADERS=On', '-DLIBCXX_OVERRIDE_DARWIN_INSTALL=On', '-DLIBCXX_INSTALL_LIBRARY=Off', '-DCMAKE_MACOSX_RPATH=On', ] if dyld_path: cmake_command += ['-DDYLD_LIBRARY_PATH=' + dyld_path] if libtool_path: cmake_command += ['-DCMAKE_LIBTOOL=' + libtool_path] if conf.CC(): cmake_command.extend(['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"]) lit_flags = ['--xunit-xml-output=testresults.xunit.xml', '-v', '--timeout=600'] if conf.max_parallel_tests: lit_flags += ['-j', conf.max_parallel_tests] cmake_command.extend( ['-DLLVM_LIT_ARGS={}'.format(' '.join(lit_flags))]) if conf.thinlto: cmake_command.extend(["-DLLVM_PARALLEL_LINK_JOBS=1"]) elif conf.lto: cmake_command.extend( ["-DLLVM_PARALLEL_LINK_JOBS=" + str(max_link_jobs())]) else: cmake_command.extend(['-DLLVM_ENABLE_LTO=Off']) cmake_command.extend([ '-DCMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -gline-tables-only -DNDEBUG', '-DCMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -gline-tables-only -DNDEBUG']) for flag in conf.cmake_flags: cmake_command += [flag] cmake_command.append("{}/llvm".format(conf.workspace)) run_cmd(os.path.join(clang_br, 'Build'), cmake_command) next_section("Ninja") run_cmd(os.path.join(clang_br, 'Build'), [NINJA, '-v', 'install']) build_upload_artifact() else: print 'Stage two compile TBD in near future' if not conf.device and (target == "test" or target == "all"): next_section("Tests") obj_dir = os.path.join(conf._build_dir, 'Objects/obj-llvm/tools/clang/stage2-bins/') if not os.path.exists(obj_dir): obj_dir = os.path.join(conf._build_dir, 'Build/') obj_dir = os.path.join(conf.workspace, obj_dir) cmd = [NINJA, '-v', '-k', '0', 'check-all'] if conf.assertions: cmd[-1] += ' --param use_gmalloc=1 ' '--param gmalloc_path=$(xcodebuild -find-library' ' libgmalloc.dylib)' run_cmd(obj_dir, cmd, env={'MALLOC_LOG_FILE': '/dev/null'}) def parse_settings_from_output(working_dir, cmd): old_dir = os.getcwd() try: os.chdir(working_dir) assignment_regex = re.compile(r"^\s+([^\s=]+)\s*=\s*(.+)$") settings = {} for line in subprocess.check_output(cmd).splitlines(True): match = assignment_regex.match(line) if match: settings[match.group(1)] = match.group(2) return settings finally: os.chdir(old_dir) def lldb_builder(): header("Clean LLDB build directory") if os.path.exists(conf.lldbbuilddir()): shutil.rmtree(conf.lldbbuilddir()) footer() build_configuration = "Release" xcodebuild_cmd = [ "xcodebuild", "-arch", "x86_64", "-configuration", build_configuration, "-scheme", "desktop", "-derivedDataPath", conf.lldbbuilddir() ] header("Build Xcode desktop scheme") run_cmd("lldb", xcodebuild_cmd) footer() header("Gather Xcode build settings") xcodebuild_cmd.append("-showBuildSettings") settings = parse_settings_from_output("lldb", xcodebuild_cmd) footer() build_dir = settings.get("BUILD_DIR", None) built_products_dir = settings.get("BUILT_PRODUCTS_DIR", None) if build_dir is None or built_products_dir is None: raise Exception("failed to retrieve build-related directories " "from Xcode") llvm_build_dir = settings.get("LLVM_BUILD_DIR", None) llvm_build_dir_arch = settings.get("LLVM_BUILD_DIR_ARCH", None) if llvm_build_dir is None or llvm_build_dir_arch is None: raise Exception("failed to retrieve LLVM build-related settings " "from Xcode") llvm_build_bin_dir = os.path.join(llvm_build_dir, llvm_build_dir_arch, "bin") built_clang_path = os.path.join(llvm_build_bin_dir, "clang") built_filecheck_path = os.path.join(llvm_build_bin_dir, "FileCheck") effective_clang = os.environ.get("LLDB_PYTHON_TESTSUITE_CC", built_clang_path) xcodebuild_cmd = [ "xcodebuild", "-arch", "x86_64", "-configuration", build_configuration, "-scheme", "lldb-gtest", "-derivedDataPath", conf.lldbbuilddir(), ] header("Build Xcode lldb-gtest scheme") run_cmd("lldb", xcodebuild_cmd) footer() for arch in conf.lldb_test_archs: results_file = os.path.join(build_dir, "test-results-{}.xml".format(arch)) python_testsuite_cmd = [ "/usr/bin/python", "test/dotest.py", "--executable", os.path.join(built_products_dir, "lldb"), "-C", effective_clang, "--arch", arch, "--results-formatter", "lldbsuite.test_event.formatter.xunit.XunitFormatter", "--results-file", results_file, "--rerun-all-issues", "--env", "TERM=vt100", "-O--xpass=ignore", "--dsymutil="+os.path.join(os.path.dirname(effective_clang), 'dsymutil'), "--filecheck="+built_filecheck_path ] header("Run LLDB Python-based test suite ({} targets)".format(arch)) print repr(python_testsuite_cmd) run_cmd_errors_okay("lldb", python_testsuite_cmd) footer() def lldb_cmake_builder(): test_dir = os.path.join(conf.workspace, 'test') log_dir = os.path.join(test_dir, 'logs') results_file = os.path.join(test_dir, 'results.xml') dest_dir = os.path.join(conf.workspace, 'results', 'lldb') run_ws(["mkdir", "-p", conf.lldbbuilddir()]) cmake_build_type = conf.cmake_build_type if conf.cmake_build_type else 'RelWithDebInfo' header("Configure") dotest_args=['--arch', 'x86_64', '--build-dir', conf.lldbbuilddir()+'/lldb-test-build.noindex', '-s='+log_dir, '-t', '--env', 'TERM=vt100'] dotest_args.extend(conf.dotest_flags) cmake_cmd = ["/usr/local/bin/cmake", '-G', 'Ninja', conf.srcdir(), '-DLLVM_ENABLE_ASSERTIONS:BOOL={}'.format( "TRUE" if conf.assertions else "FALSE"), '-DCMAKE_BUILD_TYPE='+cmake_build_type, '-DCMAKE_MAKE_PROGRAM=' + NINJA, '-DLLVM_VERSION_PATCH=99', '-DLLVM_ENABLE_MODULES=On', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-DCMAKE_INSTALL_PREFIX="%s"'%dest_dir, '-DLLDB_TEST_USER_ARGS='+';'.join(dotest_args), '-DLLVM_LIT_ARGS=--xunit-xml-output=%s -v'%results_file] cmake_cmd.extend(conf.cmake_flags) if conf.CC(): cmake_cmd.extend(['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"]) run_cmd(conf.lldbbuilddir(), cmake_cmd) footer() header("Build") run_cmd(conf.lldbbuilddir(), [NINJA, '-v']) footer() header("Run Tests") run_cmd(conf.lldbbuilddir(), [NINJA, '-v', 'check-debuginfo']) run_cmd(conf.lldbbuilddir(), ['/usr/bin/env', 'TERM=vt100', NINJA, '-v', 'check-lldb']) footer() def static_analyzer_benchmarks_builder(): header("Static Analyzer Benchmarks") benchmark_script = conf.workspace + "/utils-analyzer/SATestBuild.py" benchmarks_dir = conf.workspace + "/test-suite-ClangAnalyzer/" compiler_bin_dir = conf.workspace + "/host-compiler/bin/" scanbuild_bin_dir = conf.workspace + "/tools-scan-build/bin/" old_path = os.environ.get("PATH", "") env = dict(os.environ, PATH=compiler_bin_dir + os.pathsep + scanbuild_bin_dir + os.pathsep + old_path) benchmark_cmd = [benchmark_script, "--strictness", "0" ] run_cmd(benchmarks_dir, benchmark_cmd, env=env) footer() def check_repo_state(path): if os.environ.get('TESTING', False): return logging.info("Detecting repos in {}".format(path)) for r in ['llvm', 'clang', 'clang-tools-extra', 'debuginfo-tests', 'compiler-rt', 'libcxx', 'debuginfo-tests']: detected_path = derived_path('llvm', tree_path(tree='llvm', repo=r)) readme = os.path.join(path, detected_path, readme_name(repo=r)) if os.path.exists(readme): logging.info(" - {} found at {}".format(r, detected_path)) else: logging.info(" - {} not found".format(r)) def checkout_path(workspace, repo): return workspace + "/" + repo + ".src" def tree_path(tree, repo): if tree == "llvm": if repo == "llvm": return "" if repo == "clang": return "tools/clang" if repo == "clang-tools-extra": return "tools/clang/tools/extra" if repo == "debuginfo-tests": return "tools/clang/test/debuginfo-tests" if repo == "compiler-rt": return "projects/compiler-rt" if repo == "libcxx": return "projects/libcxx" if repo == "lldb": return "tools/lldb" elif tree == "lldb": if repo == "lldb": return "" if repo == "llvm": return "llvm" if repo == "clang": return "llvm/tools/clang" if repo == "compiler-rt": return "llvm/projects/compiler-rt" if repo == "libcxx": return "llvm/projects/libcxx" else: logging.error("Unknown tree '{}'".format(tree)) sys.exit(1) logging.error("Unknown repo '{}' in tree '{}".format(repo, tree)) sys.exit(1) def tree_srcdir(conf, tree): if tree == "llvm": return conf.srcdir() if tree == "lldb": return conf.lldbsrcdir() logging.error("Unknown tree '{}'".format(tree)) sys.exit(1) def derived_path(srcdir, tree_path): if tree_path: return srcdir + "/" + tree_path return srcdir def should_exclude(base_path, repo_path): if base_path == repo_path: return False if not base_path: return True if repo_path.startswith(base_path + "/"): return True return False def http_download(url, dest): try: print "GETting", url, "to", dest, "...", f = urlopen(url) with open(dest, "wb") as local_file: local_file.write(f.read()) except HTTPError, e: print print "HTTP Error:", e.code, url sys.exit(1) except URLError, e: print print "URL Error:", e.reason, url sys.exit(1) print "done." def rsync(conf, tree, repo, repos): cmd = ["rsync", "-auvh", "--delete", "--exclude=.svn/"] path = tree_path(tree=tree, repo=repo) for x in repos: x_path = tree_path(tree=tree, repo=x) if should_exclude(path, x_path): cmd.append("--exclude=/" + x_path) workspace = conf.workspace srcdir = tree_srcdir(conf=conf, tree=tree) cmd.append(checkout_path(workspace=workspace, repo=repo) + "/") cmd.append(derived_path(srcdir=srcdir, tree_path=path)) run_cmd(working_dir=srcdir, cmd=cmd) def derive(tree, repos): if 'debuginfo-tests' in repos: dest_path = conf.workspace + "/" + 'llvm/tools/clang/test/debuginfo-tests' if os.path.exists(dest_path): print 'Remove debuginfo-tests from derived source if it exists' run_ws(['rm', '-rf', dest_path]) for p in repos: full_path = checkout_path(workspace=conf.workspace, repo=p) update_svn_checkout(working_dir=full_path) if not os.path.exists(full_path): logging.error("Cannot find Repo: in " + full_path) sys.exit(1) srcdir = tree_srcdir(conf=conf, tree=tree) for p in repos: full_path = derived_path(srcdir=srcdir, tree_path=tree_path(tree=tree, repo=p)) if not os.path.exists(full_path): os.makedirs(full_path) header("Derive Source") for repo in repos: rsync(conf=conf, tree=tree, repo=repo, repos=repos) footer() def derive_llvm(repos=['llvm', 'clang', 'libcxx', 'clang-tools-extra', 'compiler-rt']): derive(tree='llvm', repos=repos)
Apache License 2.0
vermeille/torchelie
torchelie/loss/neuralstyleloss.py
NeuralStyleLoss.forward
python
def forward( self, input_img: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, float]]: out = self.get_style_content_(input_img, detach=False) c_ratio = 1. - self.ratio.squeeze() s_ratio = self.ratio.squeeze() style_loss = sum( F.l1_loss(self.style_maps[a], bgram(out[a])) for a in self.style_layers) / len(self.style_maps) content_loss = sum( F.mse_loss(self.content[a], out[a]) for a in self.content_layers) / len(self.content_layers) loss = c_ratio * content_loss + s_ratio * style_loss return loss, { 'style': style_loss.item(), 'content': content_loss.item() }
Actually compute the loss
https://github.com/vermeille/torchelie/blob/28b0c16e3efc8ab00625532e0c980727a33b6776/torchelie/loss/neuralstyleloss.py#L101-L125
import torch import torch.nn.functional as F import torch.nn as nn import random from typing import Dict, Optional, List, cast, Tuple from torchelie.utils import bgram import torchelie as tch import torchelie.utils as tu from torchelie.nn import ImageNetInputNorm, WithSavedActivations from torchelie.models import PerceptualNet class NeuralStyleLoss(nn.Module): net: PerceptualNet def __init__(self) -> None: super(NeuralStyleLoss, self).__init__() self.style_layers = [ 'conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1', ] self.content_layers = ['conv3_2'] self.content = {} self.style_maps = {} self.net = PerceptualNet(self.style_layers + self.content_layers, remove_unused_layers=False) self.norm = ImageNetInputNorm() tu.freeze(self.net) def get_style_content_(self, img: torch.Tensor, detach: bool) -> Dict[str, Dict[str, torch.Tensor]]: activations: Dict[str, torch.Tensor] _, activations = self.net(self.norm(img), detach=detach) activations = { k: F.instance_norm(a.float()) for k, a in activations.items() } return activations def set_style(self, style_img: torch.Tensor, style_ratio: float, style_layers: Optional[List[str]] = None) -> None: if style_layers is not None: self.style_layers = style_layers self.net.set_keep_layers(names=self.style_layers + self.content_layers) self.ratio = torch.tensor(style_ratio) with torch.no_grad(): out = self.get_style_content_(style_img, detach=True) self.style_maps = {k: bgram(out[k]) for k in self.style_layers} def set_content(self, content_img: torch.Tensor, content_layers: Optional[List[str]] = None) -> None: if content_layers is not None: self.content_layers = content_layers self.net.set_keep_layers(names=self.style_layers + self.content_layers) with torch.no_grad(): out = self.get_style_content_(content_img, detach=True) self.content = {a: out[a] for a in self.content_layers}
MIT License
thehappydinoa/ashssdk
lambda/awscli/customizations/ec2/decryptpassword.py
LaunchKeyArgument.add_to_params
python
def add_to_params(self, parameters, value): if value: path = os.path.expandvars(value) path = os.path.expanduser(path) if os.path.isfile(path): self._key_path = path endpoint_prefix = self._operation_model.service_model.endpoint_prefix event = 'after-call.%s.%s' % (endpoint_prefix, self._operation_model.name) self._session.register(event, self._decrypt_password_data) else: msg = ('priv-launch-key should be a path to the ' 'local SSH private key file used to launch ' 'the instance.') raise ValueError(msg)
This gets called with the value of our ``--priv-launch-key`` if it is specified. It needs to determine if the path provided is valid and, if it is, it stores it in the instance variable ``_key_path`` for use by the decrypt routine.
https://github.com/thehappydinoa/ashssdk/blob/d251a08ba6c35d81cf41b3267db666b08e875515/lambda/awscli/customizations/ec2/decryptpassword.py#L73-L94
import logging import os import base64 import rsa from awscli.compat import six from botocore import model from awscli.arguments import BaseCLIArgument logger = logging.getLogger(__name__) HELP = """<p>The file that contains the private key used to launch the instance (e.g. windows-keypair.pem). If this is supplied, the password data sent from EC2 will be decrypted before display.</p>""" def ec2_add_priv_launch_key(argument_table, operation_model, session, **kwargs): argument_table['priv-launch-key'] = LaunchKeyArgument( session, operation_model, 'priv-launch-key') class LaunchKeyArgument(BaseCLIArgument): def __init__(self, session, operation_model, name): self._session = session self.argument_model = model.Shape('LaunchKeyArgument', {'type': 'string'}) self._operation_model = operation_model self._name = name self._key_path = None self._required = False @property def cli_type_name(self): return 'string' @property def required(self): return self._required @required.setter def required(self, value): self._required = value @property def documentation(self): return HELP def add_to_parser(self, parser): parser.add_argument(self.cli_name, dest=self.py_name, help='SSH Private Key file')
MIT License
lingz/fast_fuzzy_search
fast_fuzzy_search/fast_fuzzy_search.py
FastFuzzySearch.score_for_id
python
def score_for_id(self, id, query): term = self.library.get(id) score = levenshtein(term.lower(), query.lower()) return Result(id, term, score)
Given a termId, returns the scored Result object
https://github.com/lingz/fast_fuzzy_search/blob/cdbdae0e6e3912b68137f94f4e47d5254fa6a7d4/fast_fuzzy_search/fast_fuzzy_search.py#L80-L86
import pyphone from fast_fuzzy_search.levenshtein import levenshtein from collections import namedtuple import operator from functools import reduce Result = namedtuple('Result', ['id', 'term', 'score']) class FastFuzzySearch: def __init__(self, options={}): self.index = {} self.library = {} self.options = options self.options.setdefault('language', 'english') def add_term(self, term, id): self.library[id] = term words = term.split(' ') for word in words: variants = self.generate_phonetic_variants(word) for variant in variants: if variant: ids = self.index.setdefault(variant, set()) ids.add(id) def search(self, query, n_results=10): query_words = query.split(' ') res_sets = map(self.ids_for_query_word, query_words) common_ids = set.intersection(*res_sets) candidates = map(lambda id: self.score_for_id(id, query), common_ids) ranked_candidates = sorted(candidates, key=lambda res: res.score) return ranked_candidates[:n_results] def generate_phonetic_variants(self, word): res = [] phonexes = pyphone.phonex(word, language=self.options['language']) for phonex in phonexes: one_delete = self.deletes(phonex) two_deletes = reduce(operator.add, map(self.deletes, one_delete)) res += [phonex] res += one_delete res += two_deletes return res @staticmethod def deletes(term): res = [] for i in range(len(term)): res.append(term[:i] + term[i + 1:]) return res
MIT License
budowski/rest_gae
rest_gae/rest_gae.py
get_rest_class
python
def get_rest_class(ndb_model, base_url, **kwd): class RESTHandlerClass(BaseRESTHandler, blobstore_handlers.BlobstoreUploadHandler, blobstore_handlers.BlobstoreDownloadHandler): model = import_class(ndb_model) if not hasattr(model, 'RESTMeta'): class NewRESTMeta: pass model.RESTMeta = NewRESTMeta model.RESTMeta.base_url = base_url permissions = { 'OPTIONS': PERMISSION_ANYONE } permissions.update(kwd.get('permissions', {})) allow_http_method_override = kwd.get('allow_http_method_override', True) allowed_origin = kwd.get('allowed_origin', None) after_get_callback = [kwd.get('after_get_callback', None)] before_post_callback = [kwd.get('before_post_callback', None)] after_post_callback = [kwd.get('after_post_callback', None)] before_put_callback = [kwd.get('before_put_callback', None)] after_put_callback = [kwd.get('after_put_callback', None)] before_delete_callback = [kwd.get('before_delete_callback', None)] after_delete_callback = [kwd.get('after_delete_callback', None)] if PERMISSION_OWNER_USER in permissions.values(): if not hasattr(model, 'RESTMeta') or not hasattr(model.RESTMeta, 'user_owner_property'): raise ValueError('Must define a RESTMeta.user_owner_property for the model class %s if user-owner permission is used' % (model)) if not hasattr(model, model.RESTMeta.user_owner_property): raise ValueError('The user_owner_property "%s" (defined in RESTMeta.user_owner_property) does not exist in the given model %s' % (model.RESTMeta.user_owner_property, model)) def __init__(self, request, response): self.initialize(request, response) blobstore_handlers.BlobstoreUploadHandler.__init__(self, request, response) blobstore_handlers.BlobstoreDownloadHandler.__init__(self, request, response) self.after_get_callback = self.after_get_callback[0] self.before_post_callback = self.before_post_callback[0] self.after_post_callback = self.after_post_callback[0] self.before_put_callback = self.before_put_callback[0] self.after_put_callback = self.after_put_callback[0] self.before_delete_callback = self.before_delete_callback[0] self.after_delete_callback = self.after_delete_callback[0] def rest_method_wrapper(func): def inner_f(self, model_id, property_name=None): method_name = func.func_name.upper() if method_name not in self.permissions: return self.method_not_allowed() permission = self.permissions[method_name] if (permission in [PERMISSION_LOGGED_IN_USER, PERMISSION_OWNER_USER, PERMISSION_ADMIN]) and (not self.user): return self.unauthorized() elif permission == PERMISSION_ADMIN and not self.is_user_admin: return self.permission_denied() try: if model_id: model = self._model_id_to_model(model_id.lstrip('/')) if (permission == PERMISSION_OWNER_USER) and (self.get_model_owner(model) != self.user.key): return self.permission_denied() if property_name and model: property_name = translate_property_names({ property_name: True }, model, 'input').keys()[0] result = func(self, model, property_name) else: result = func(self, None, None) if isinstance(result, webapp2.Response): return result elif not isinstance(result, NoResponseResult): return self.success(result) except RESTException, exc: return self.error(exc) return inner_f @rest_method_wrapper def options(self, model, property_name=None): return '' @rest_method_wrapper def get(self, model, property_name=None): if not model: query = self._filter_query() if self.permissions['GET'] == PERMISSION_OWNER_USER: query = query.filter(getattr(self.model, self.user_owner_property) == self.user.key) query = self._order_query(query) (results, cursor) = self._fetch_query(query) if self.after_get_callback: results = self.after_get_callback(results) return { 'results': results, 'next_results_url': self._build_next_query_url(cursor) } else: if property_name: if not hasattr(model, property_name): raise RESTException('Invalid property name "%s"' % property_name) blob_key = getattr(model, property_name) if not blob_key: raise RESTException('"%s" is not set' % property_name) if not isinstance(blob_key, blobstore.BlobKey): raise RESTException('"%s" is not a BlobKeyProperty' % property_name) self.send_blob(blob_key) return NoResponseResult() if self.after_get_callback: model = self.after_get_callback(model) return model @rest_method_wrapper def post(self, model, property_name=None): if model and not property_name: raise RESTException('Cannot POST to a specific model ID') if model and property_name: if not hasattr(model, property_name): raise RESTException('Invalid property name "%s"' % property_name) if not isinstance(model._properties[property_name], ndb.BlobKeyProperty): raise RESTException('"%s" is not a BlobKeyProperty' % property_name) upload_files = self.get_uploads() if not upload_files: upload_url = blobstore.create_upload_url(self.request.url) return self.redirect(upload_url, code=307) blob_info = upload_files[0] if getattr(model, property_name): blobstore.delete(getattr(model, property_name)) setattr(model, property_name, blob_info.key()) model.put() return { 'status': True } try: json_data = json.loads(self.request.body) except ValueError as exc: raise RESTException('Invalid JSON POST data') if not isinstance(json_data, list): json_data = [json_data] models = [] for model_to_create in json_data: try: model = self._build_model_from_data(model_to_create, self.model) models.append(model) except Exception as exc: raise RESTException('Invalid JSON POST data - %s' % exc) if self.before_post_callback: models = self.before_post_callback(models, json_data) created_keys = ndb.put_multi(models) if self.after_post_callback: models = self.after_post_callback(created_keys, models) return models @rest_method_wrapper def put(self, model, property_name=None): models = [] try: json_data = json.loads(self.request.body) except ValueError as exc: raise RESTException('Invalid JSON PUT data') if model: model = self._build_model_from_data(json_data, self.model, model) json_data = [json_data] models.append(model) else: if not isinstance(json_data, list): raise RESTException('Invalid JSON PUT data') for model_to_update in json_data: model_id = model_to_update.pop('id', None) if model_id is None: raise RESTException('Missing "id" argument for model') model = self._model_id_to_model(model_id) model = self._build_model_from_data(model_to_update, self.model, model) models.append(model) if self.before_put_callback: models = self.before_put_callback(models, json_data) updated_keys = ndb.put_multi(models) if self.after_put_callback: models = self.after_put_callback(updated_keys, models) return models def _delete_model_blobs(self, model): for (name, prop) in model._properties.iteritems(): if isinstance(prop, ndb.BlobKeyProperty): if getattr(model, name): blobstore.delete(getattr(model, name)) @rest_method_wrapper def delete(self, model, property_name=None): models = [] if model: models.append(model) else: if self.permissions['DELETE'] == PERMISSION_OWNER_USER: query = self.model.query().filter(getattr(self.model, self.user_owner_property) == self.user.key) else: query = self.model.query() cursor = None more_available = True while more_available: results, cursor, more_available = query.fetch_page(BaseRESTHandler.DEFAULT_MAX_QUERY_RESULTS, start_cursor=cursor) if results: models.extend(results) if self.before_delete_callback: models = self.before_delete_callback(models) for m in models: self._delete_model_blobs(m) deleted_keys = ndb.delete_multi(m.key for m in models) if self.after_delete_callback: self.after_delete_callback(deleted_keys, models) return models @webapp2.cached_property def is_user_admin(self): if not hasattr(self.user, 'RESTMeta') or not hasattr(self.user.RESTMeta, 'admin_property'): raise ValueError('The user model class %s must include a RESTMeta class with `admin_property` defined' % (self.user.__class__)) admin_property = self.user.RESTMeta.admin_property if not hasattr(self.user, admin_property): raise ValueError('The user model class %s does not have the property %s as defined in its RESTMeta.admin_property' % (self.user.__class__, admin_property)) return getattr(self.user, admin_property) @webapp2.cached_property def user_owner_property(self): return self.model.RESTMeta.user_owner_property def get_model_owner(self, model): return getattr(model, self.user_owner_property) return RESTHandlerClass
Returns a RESTHandlerClass with the ndb_model and permissions set according to input
https://github.com/budowski/rest_gae/blob/0852e2b51e65ee6a239cf9eaebb5e27a1eef7bd4/rest_gae/rest_gae.py#L567-L935
import importlib import json import re from urlparse import urlparse from datetime import datetime, time, date from urllib import urlencode import webapp2 from google.appengine.ext import ndb from google.appengine.ext.ndb import Cursor from google.appengine.ext.db import BadValueError, BadRequestError from webapp2_extras import auth from webapp2_extras import sessions from webapp2_extras.routes import NamePrefixRoute from google.appengine.ext import blobstore from google.appengine.ext.webapp import blobstore_handlers from google.appengine.api import app_identity from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError try: import dateutil.parser except ImportError as e: dateutil = None PERMISSION_ANYONE = 'anyone' PERMISSION_LOGGED_IN_USER = 'logged_in_user' PERMISSION_OWNER_USER = 'owner_user' PERMISSION_ADMIN = 'admin' class NDBEncoder(json.JSONEncoder): def _decode_key(self, key): model_class = ndb.Model._kind_map.get(key.kind()) if getattr(model_class, 'RESTMeta', None) and getattr(model_class.RESTMeta, 'use_input_id', False): return key.string_id() else: return key.urlsafe() def default(self, obj): if isinstance(obj, ndb.Model): obj_dict = obj.to_dict() for (name, prop) in obj._properties.iteritems(): if isinstance(prop, ndb.BlobKeyProperty): server_host = app_identity.get_default_version_hostname() blob_property_url = 'http://%s%s/%s/%s' % (server_host, obj.RESTMeta.base_url, self._decode_key(obj.key), name) obj_dict[name] = { 'upload_url': blob_property_url, 'download_url': blob_property_url if getattr(obj, name) else None } included_properties = get_included_properties(obj, 'output') obj_dict = dict((k,v) for k,v in obj_dict.iteritems() if k in included_properties) obj_dict = translate_property_names(obj_dict, obj, 'output') obj_dict['id'] = self._decode_key(obj.key) return obj_dict elif isinstance(obj, datetime) or isinstance(obj, date) or isinstance(obj, time): return obj.isoformat() elif isinstance(obj, ndb.Key): return self._decode_key(obj) elif isinstance(obj, ndb.GeoPt): return str(obj) else: return json.JSONEncoder.default(self, obj) class RESTException(Exception): pass class NoResponseResult(object): pass def get_translation_table(model, input_type): meta_class = getattr(model, 'RESTMeta', None) if not meta_class: return {} translation_table = getattr(model.RESTMeta, 'translate_property_names', {}) translation_table.update(getattr(model.RESTMeta, 'translate_%s_property_names' % input_type, {})) return translation_table def translate_property_names(data, model, input_type): translation_table = get_translation_table(model, input_type) if not translation_table: return data for old_name, new_name in translation_table.iteritems(): if input_type == 'output' and old_name not in data: continue if input_type == 'input' and new_name not in data: continue if input_type == 'output': original_value = data[old_name] del data[old_name] data[new_name] = original_value elif input_type == 'input': original_value = data[new_name] del data[new_name] data[old_name] = original_value return data def get_included_properties(model, input_type): meta_class = getattr(model, 'RESTMeta', None) included_properties = set() if meta_class: included_properties = set(getattr(meta_class, 'included_%s_properties' % input_type, [])) included_properties.update(set(getattr(meta_class, 'included_properties', []))) if not included_properties: included_properties = set(model._properties.keys()) if meta_class: excluded_properties = set(getattr(meta_class, 'excluded_%s_properties' % input_type, [])) excluded_properties.update(set(getattr(meta_class, 'excluded_properties', []))) else: excluded_properties = set() if input_type == 'input': excluded_properties.update(set(BaseRESTHandler.DEFAULT_EXCLUDED_INPUT_PROPERTIES)) if meta_class and getattr(meta_class, 'use_input_id', False): included_properties.update(['id']) if input_type == 'output': excluded_properties.update(set(BaseRESTHandler.DEFAULT_EXCLUDED_OUTPUT_PROPERTIES)) properties = included_properties - excluded_properties return properties def import_class(input_cls): if not isinstance(input_cls, str): return input_cls try: (module_name, class_name) = input_cls.rsplit('.', 1) module = __import__(module_name, fromlist=[class_name]) return getattr(module, class_name) except Exception, exc: raise ValueError("Couldn't import the model class '%s'" % input_cls) class BaseRESTHandler(webapp2.RequestHandler): DEFAULT_MAX_QUERY_RESULTS = 1000 DEFAULT_EXCLUDED_INPUT_PROPERTIES = [ 'class_' ] DEFAULT_EXCLUDED_OUTPUT_PROPERTIES = [ ] def dispatch(self): self.session_store = sessions.get_store(request=self.request) try: if getattr(self, 'allow_http_method_override', False) and ('X-HTTP-Method-Override' in self.request.headers): overridden_method_name = self.request.headers['X-HTTP-Method-Override'].upper().strip() if overridden_method_name not in ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']: return self.method_not_allowed() self.request.method = overridden_method_name if getattr(self, 'allowed_origin', None): allowed_origin = self.allowed_origin if 'Origin' in self.request.headers: origin = self.request.headers['Origin'] if (origin != allowed_origin) and (allowed_origin != '*'): return self.permission_denied('Origin not allowed') response = webapp2.RequestHandler.dispatch(self) except: raise else: self.session_store.save_sessions(response) return response @webapp2.cached_property def session(self): backend = self.app.config.get("session_backend", "datastore") return self.session_store.get_session(backend=backend) @webapp2.cached_property def auth(self): return auth.get_auth() @webapp2.cached_property def user_info(self): return self.auth.get_user_by_session() @webapp2.cached_property def user_model(self): return self.auth.store.user_model @webapp2.cached_property def user(self): u = self.user_info return self.user_model.get_by_id(u['user_id']) if u else None def get_response(self, status, content): response = webapp2.Response(json.dumps(content, cls=NDBEncoder)) response.status = status response.headers['Content-Type'] = 'application/json' response.headers['Access-Control-Allow-Methods'] = ', '.join(self.permissions.keys()) if getattr(self, 'allowed_origin', None): response.headers['Access-Control-Allow-Origin'] = self.allowed_origin return response def success(self, content): return self.get_response(200, content) def error(self, exception): return self.get_response(400, {'error': str(exception)}) def method_not_allowed(self): return self.get_response(405, {}) def permission_denied(self, reason=None): return self.get_response(403, { 'reason': reason}) def unauthorized(self): return self.get_response(401, {}) def redirect(self, url, **kwd): return webapp2.redirect(url, **kwd) def _model_id_to_model(self, model_id): if not model_id: return None try: if getattr(self.model, 'RESTMeta', None) and getattr(self.model.RESTMeta, 'use_input_id', False): model = ndb.Key(self.model, model_id).get() else: model = ndb.Key(urlsafe=model_id).get() if not model: raise Exception() except Exception, exc: raise RESTException('Invalid model id - %s' % model_id) return model def _build_next_query_url(self, cursor): if not cursor: return None params = self.request.GET params['cursor'] = cursor.urlsafe() return self.request.path_url + '?' + urlencode(params) def _filter_query(self): if not self.request.GET.get('q'): return self.model.query() try: translation_table = get_translation_table(self.model, 'input') query = self.request.GET.get('q') for original_name, new_name in translation_table.iteritems(): query = re.sub(r'\b%s\s*(<=|>=|=|<|>|!=|(\s+IN\s+))' % new_name, r'%s \1' % original_name, query, flags=re.IGNORECASE) return self.model.gql('WHERE ' + query) except Exception, exc: raise RESTException('Invalid query param - "%s"' % self.request.GET.get('q')) def _fetch_query(self, query): if not self.request.GET.get('limit'): limit = BaseRESTHandler.DEFAULT_MAX_QUERY_RESULTS else: try: limit = int(self.request.GET.get('limit')) if limit <= 0: raise ValueError('Limit cannot be zero or less') except ValueError, exc: raise RESTException('Invalid "limit" parameter - %s' % self.request.GET.get('limit')) if not self.request.GET.get('cursor'): cursor = None else: try: cursor = Cursor(urlsafe=self.request.GET.get('cursor')) except BadValueError, exc: raise RESTException('Invalid "cursor" argument - %s' % self.request.GET.get('cursor')) try: (results, cursor, more_available) = query.fetch_page(limit, start_cursor=cursor) except BadRequestError, exc: raise RESTException('Invalid "cursor" argument - %s' % self.request.GET.get('cursor')) if not more_available: cursor = None return (results, cursor) def _order_query(self, query): if not self.request.GET.get('order'): orders = [] else: try: orders = [o.strip() for o in self.request.GET.get('order').split(',')] orders = ['+'+o if not o.startswith('-') and not o.startswith('+') else o for o in orders] translated_orders = dict([order.lstrip('-+'), order[0]] for order in orders) translated_orders = translate_property_names(translated_orders, self.model, 'input') orders = [-getattr(self.model, order) if direction == '-' else getattr(self.model, order) for order,direction in translated_orders.iteritems()] except AttributeError, exc: raise RESTException('Invalid "order" parameter - %s' % self.request.GET.get('order')) orders.append(self.model.key) return query.order(*orders) def _build_model_from_data(self, data, cls, model=None): data = translate_property_names(data, cls, 'input') input_properties = { } for (name, prop) in cls._properties.iteritems(): if name not in data: continue if prop._repeated: input_properties[name] = [self._value_to_property(value, prop) for value in data[name]] else: input_properties[name] = self._value_to_property(data[name], prop) if not model and getattr(cls, 'RESTMeta', None) and getattr(cls.RESTMeta, 'use_input_id', False): if 'id' not in data: raise RESTException('id field is required') input_properties['id'] = data['id'] included_properties = get_included_properties(cls, 'input') input_properties = dict((k,v) for k,v in input_properties.iteritems() if k in included_properties) if hasattr(cls, 'RESTMeta') and hasattr(cls.RESTMeta, 'user_owner_property'): if not model and self.user: input_properties[cls.RESTMeta.user_owner_property] = self.user.key if not model: model = cls(**input_properties) else: model.populate(**input_properties) return model def _value_to_property(self, value, prop): if isinstance(prop, ndb.KeyProperty): if value is None: return None try: return ndb.Key(urlsafe=value) except ProtocolBufferDecodeError as e: if prop._kind is not None: model_class = ndb.Model._kind_map.get(prop._kind) if getattr(model_class, 'RESTMeta', None) and getattr(model_class.RESTMeta, 'use_input_id', False): return ndb.Key(model_class, value) raise RESTException('invalid key: {}'.format(value) ) elif isinstance(prop, ndb.TimeProperty): if dateutil is None: try: return datetime.strptime(value, "%H:%M:%S").time() except ValueError as e: raise RESTException("Invalid time. Must be in ISO 8601 format.") else: return dateutil.parser.parse(value).time() elif isinstance(prop, ndb.DateProperty): if dateutil is None: try: return datetime.strptime(value, "%Y-%m-%d").date() except ValueError as e: raise RESTException("Invalid date. Must be in ISO 8601 format.") else: return dateutil.parser.parse(value).date() elif isinstance(prop, ndb.DateTimeProperty): if dateutil is None: try: return datetime.strptime(value, "%Y-%m-%dT%H:%M:%S") except ValueError as e: raise RESTException("Invalid datetime. Must be in ISO 8601 format.") else: return dateutil.parser.parse(value) elif isinstance(prop, ndb.GeoPtProperty): return ndb.GeoPt(value) elif isinstance(prop, ndb.StructuredProperty): return self._build_model_from_data(value, prop._modelclass) else: return value
Apache License 2.0
pypa/pipenv
pipenv/vendor/click/testing.py
Result.stdout
python
def stdout(self) -> str: return self.stdout_bytes.decode(self.runner.charset, "replace").replace( "\r\n", "\n" )
The standard output as unicode string.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/vendor/click/testing.py#L139-L143
import contextlib import io import os import shlex import shutil import sys import tempfile import typing as t from types import TracebackType from . import formatting from . import termui from . import utils from ._compat import _find_binary_reader if t.TYPE_CHECKING: from .core import BaseCommand class EchoingStdin: def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None: self._input = input self._output = output self._paused = False def __getattr__(self, x: str) -> t.Any: return getattr(self._input, x) def _echo(self, rv: bytes) -> bytes: if not self._paused: self._output.write(rv) return rv def read(self, n: int = -1) -> bytes: return self._echo(self._input.read(n)) def read1(self, n: int = -1) -> bytes: return self._echo(self._input.read1(n)) def readline(self, n: int = -1) -> bytes: return self._echo(self._input.readline(n)) def readlines(self) -> t.List[bytes]: return [self._echo(x) for x in self._input.readlines()] def __iter__(self) -> t.Iterator[bytes]: return iter(self._echo(x) for x in self._input) def __repr__(self) -> str: return repr(self._input) @contextlib.contextmanager def _pause_echo(stream: t.Optional[EchoingStdin]) -> t.Iterator[None]: if stream is None: yield else: stream._paused = True yield stream._paused = False class _NamedTextIOWrapper(io.TextIOWrapper): def __init__( self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any ) -> None: super().__init__(buffer, **kwargs) self._name = name self._mode = mode @property def name(self) -> str: return self._name @property def mode(self) -> str: return self._mode def make_input_stream( input: t.Optional[t.Union[str, bytes, t.IO]], charset: str ) -> t.BinaryIO: if hasattr(input, "read"): rv = _find_binary_reader(t.cast(t.IO, input)) if rv is not None: return rv raise TypeError("Could not find binary reader for input stream.") if input is None: input = b"" elif isinstance(input, str): input = input.encode(charset) return io.BytesIO(t.cast(bytes, input)) class Result: def __init__( self, runner: "CliRunner", stdout_bytes: bytes, stderr_bytes: t.Optional[bytes], return_value: t.Any, exit_code: int, exception: t.Optional[BaseException], exc_info: t.Optional[ t.Tuple[t.Type[BaseException], BaseException, TracebackType] ] = None, ): self.runner = runner self.stdout_bytes = stdout_bytes self.stderr_bytes = stderr_bytes self.return_value = return_value self.exit_code = exit_code self.exception = exception self.exc_info = exc_info @property def output(self) -> str: return self.stdout @property
MIT License
masoniteframework/orm
tests/mysql/grammar/test_mysql_select_grammar.py
TestMySQLGrammar.shared_lock
python
def shared_lock(self): return "SELECT * FROM `users` WHERE `users`.`votes` >= '100' LOCK IN SHARE MODE"
builder = self.get_builder() builder.where("age", "not like", "%name%").to_sql()
https://github.com/masoniteframework/orm/blob/29b331068ddad9f17584c3dd48917043eba915ab/tests/mysql/grammar/test_mysql_select_grammar.py#L373-L378
import inspect import unittest from src.masoniteorm.query.grammars import MySQLGrammar from src.masoniteorm.testing import BaseTestCaseSelectGrammar class TestMySQLGrammar(BaseTestCaseSelectGrammar, unittest.TestCase): grammar = MySQLGrammar def can_compile_select(self): return "SELECT * FROM `users`" def can_compile_with_columns(self): return "SELECT `users`.`username`, `users`.`password` FROM `users`" def can_compile_order_by_and_first(self): return """SELECT * FROM `users` ORDER BY `id` ASC LIMIT 1""" def can_compile_with_where(self): return "SELECT `users`.`username`, `users`.`password` FROM `users` WHERE `users`.`id` = '1'" def can_compile_with_several_where(self): return "SELECT `users`.`username`, `users`.`password` FROM `users` WHERE `users`.`id` = '1' AND `users`.`username` = 'joe'" def can_compile_with_several_where_and_limit(self): return "SELECT `users`.`username`, `users`.`password` FROM `users` WHERE `users`.`id` = '1' AND `users`.`username` = 'joe' LIMIT 10" def can_compile_with_sum(self): return "SELECT SUM(`users`.`age`) AS age FROM `users`" def can_compile_with_max(self): return "SELECT MAX(`users`.`age`) AS age FROM `users`" def can_compile_with_max_and_columns(self): return "SELECT `users`.`username`, MAX(`users`.`age`) AS age FROM `users`" def can_compile_with_max_and_columns_different_order(self): return "SELECT `users`.`username`, MAX(`users`.`age`) AS age FROM `users`" def can_compile_with_order_by(self): return "SELECT `users`.`username` FROM `users` ORDER BY `age` DESC" def can_compile_with_multiple_order_by(self): return "SELECT `users`.`username` FROM `users` ORDER BY `age` DESC, `name` ASC" def can_compile_with_group_by(self): return "SELECT `users`.`username` FROM `users` GROUP BY `users`.`age`" def can_compile_where_in(self): return "SELECT `users`.`username` FROM `users` WHERE `users`.`age` IN ('1','2','3')" def can_compile_where_in_empty(self): return """SELECT * FROM `users` WHERE 0 = 1""" def can_compile_where_not_in(self): return "SELECT `users`.`username` FROM `users` WHERE `users`.`age` NOT IN ('1','2','3')" def can_compile_where_null(self): return "SELECT `users`.`username` FROM `users` WHERE `users`.`age` IS NULL" def can_compile_where_not_null(self): return "SELECT `users`.`username` FROM `users` WHERE `users`.`age` IS NOT NULL" def can_compile_where_raw(self): return "SELECT * FROM `users` WHERE `users`.`age` = '18'" def can_compile_select_raw(self): return "SELECT COUNT(*) FROM `users`" def can_compile_limit_and_offset(self): return "SELECT * FROM `users` LIMIT 10 OFFSET 10" def can_compile_select_raw_with_select(self): return "SELECT `users`.`id`, COUNT(*) FROM `users`" def can_compile_count(self): return "SELECT COUNT(*) AS m_count_reserved FROM `users`" def can_compile_count_column(self): return "SELECT COUNT(`users`.`money`) AS money FROM `users`" def can_compile_where_column(self): return "SELECT * FROM `users` WHERE `users`.`name` = `users`.`email`" def can_compile_or_where(self): return ( "SELECT * FROM `users` WHERE `users`.`name` = '2' OR `users`.`name` = '3'" ) def can_grouped_where(self): return "SELECT * FROM `users` WHERE (`users`.`age` = '2' AND `users`.`name` = 'Joe')" def can_compile_sub_select(self): return "SELECT * FROM `users` WHERE `users`.`name` IN (SELECT `users`.`age` FROM `users`)" def can_compile_sub_select_value(self): return "SELECT * FROM `users` WHERE `users`.`name` = (SELECT SUM(`users`.`age`) AS age FROM `users`)" def can_compile_complex_sub_select(self): return "SELECT * FROM `users` WHERE `users`.`name` IN (SELECT `users`.`age` FROM `users` WHERE `users`.`email` IN (SELECT `users`.`email` FROM `users`))" def can_compile_exists(self): return "SELECT `users`.`age` FROM `users` WHERE EXISTS (SELECT `users`.`username` FROM `users` WHERE `users`.`age` = '12')" def can_compile_having(self): return "SELECT SUM(`users`.`age`) AS age FROM `users` GROUP BY `users`.`age` HAVING `users`.`age`" def can_compile_having_with_expression(self): return "SELECT SUM(`users`.`age`) AS age FROM `users` GROUP BY `users`.`age` HAVING `users`.`age` = '10'" def can_compile_having_with_greater_than_expression(self): return "SELECT SUM(`users`.`age`) AS age FROM `users` GROUP BY `users`.`age` HAVING `users`.`age` > '10'" def can_compile_join(self): return "SELECT * FROM `users` INNER JOIN `contacts` ON `users`.`id` = `contacts`.`user_id`" def can_compile_left_join(self): return "SELECT * FROM `users` LEFT JOIN `contacts` ON `users`.`id` = `contacts`.`user_id`" def can_compile_multiple_join(self): return "SELECT * FROM `users` INNER JOIN `contacts` ON `users`.`id` = `contacts`.`user_id` INNER JOIN `posts` ON `comments`.`post_id` = `posts`.`id`" def can_compile_between(self): return "SELECT * FROM `users` WHERE `users`.`age` BETWEEN '18' AND '21'" def can_compile_not_between(self): return "SELECT * FROM `users` WHERE `users`.`age` NOT BETWEEN '18' AND '21'" def test_can_compile_where_raw(self): to_sql = self.builder.where_raw("`age` = '18'").to_sql() self.assertEqual(to_sql, "SELECT * FROM `users` WHERE `age` = '18'") def test_can_compile_select_raw(self): to_sql = self.builder.select_raw("COUNT(*)").to_sql() self.assertEqual(to_sql, "SELECT COUNT(*) FROM `users`") def test_can_compile_select_raw_with_select(self): to_sql = self.builder.select("id").select_raw("COUNT(*)").to_sql() self.assertEqual(to_sql, "SELECT `users`.`id`, COUNT(*) FROM `users`") def can_compile_first_or_fail(self): return """SELECT * FROM `users` WHERE `users`.`is_admin` = '1' LIMIT 1""" def where_not_like(self): return "SELECT * FROM `users` WHERE `users`.`age` NOT LIKE '%name%'" def where_like(self): return "SELECT * FROM `users` WHERE `users`.`age` LIKE '%name%'" def can_compile_join_clause(self): return "SELECT * FROM `users` INNER JOIN `report_groups` AS `rg` ON `bgt`.`fund` = `rg`.`fund` AND `bgt`.`dept` = `rg`.`dept` AND `bgt`.`acct` = `rg`.`acct` AND `bgt`.`sub` = `rg`.`sub`" def can_compile_join_clause_with_value(self): return "SELECT * FROM `users` INNER JOIN `report_groups` AS `rg` ON `bgt`.`active` = '1' OR `bgt`.`acct` = '1234'" def can_compile_join_clause_with_null(self): return "SELECT * FROM `users` INNER JOIN `report_groups` AS `rg` ON `acct` IS NULL OR `dept` IS NOT NULL" def can_compile_join_clause_with_lambda(self): return "SELECT * FROM `users` INNER JOIN `report_groups` AS `rg` ON `bgt`.`fund` = `rg`.`fund` AND `bgt` IS NULL" def can_compile_left_join_clause_with_lambda(self): return "SELECT * FROM `users` LEFT JOIN `report_groups` AS `rg` ON `bgt`.`fund` = `rg`.`fund` OR `bgt` IS NULL" def can_compile_right_join_clause_with_lambda(self): return "SELECT * FROM `users` RIGHT JOIN `report_groups` AS `rg` ON `bgt`.`fund` = `rg`.`fund` OR `bgt` IS NULL"
MIT License
google/gazoo-device
gazoo_device/switchboard/transports/serial_transport.py
SerialTransport.send_xoff
python
def send_xoff(self): self._serial.set_input_flow_control(enable=False)
Sends flow control XOFF byte. Args: None Raises: SerialException: when pyserial fails to send the XOFF byte. Returns: None
https://github.com/google/gazoo-device/blob/f333b386f5993c8d4c9e12c89ebb620a0c4f5506/gazoo_device/switchboard/transports/serial_transport.py#L262-L272
import fcntl import os import time import typing from gazoo_device import gdm_logger from gazoo_device.switchboard import transport_properties as props from gazoo_device.switchboard.transports import transport_base import serial import six logger = gdm_logger.get_logger() DEFAULT_BAUDRATE = 115200 REOPEN_TIMEOUT = 20 class SerialTransport(transport_base.TransportBase): def __init__(self, comms_address, baudrate=DEFAULT_BAUDRATE, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, use_high_baudrate_flow_control=False, auto_reopen=False, open_on_start=True): super(SerialTransport, self).__init__(auto_reopen, open_on_start) self._max_read_errors = 3 self._properties.update({ props.BAUDRATE: baudrate, props.BYTESIZE: bytesize, props.PARITY: parity, props.STOPBITS: stopbits, props.XONXOFF: False, props.RTSCTS: False, props.DSRDTR: False, props.EXCLUSIVE: True, props.READ_REOPEN: True, props.USE_HIGH_BAUDRATE_FLOW_CONTROL: use_high_baudrate_flow_control }) self.comms_address = comms_address self._read_errors = 0 self._serial = typing.cast(serial.serialposix.Serial, serial.Serial()) self._serial.port = comms_address def is_open(self): return hasattr(self, "_serial") and self._serial.isOpen() def _open(self): self._serial.baudrate = self._properties[props.BAUDRATE] self._serial.bytesize = self._properties[props.BYTESIZE] self._serial.parity = self._properties[props.PARITY] self._serial.stopbits = self._properties[props.STOPBITS] self._serial.xonxoff = self._properties[props.XONXOFF] self._serial.rtscts = self._properties[props.RTSCTS] self._serial.dsrdtr = self._properties[props.DSRDTR] self._serial.exclusive = self._properties[props.EXCLUSIVE] self._serial.timeout = None self._serial.write_timeout = None self._serial.open() fd = self._serial.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def _close(self): fcntl.flock(self._serial.fileno(), fcntl.LOCK_UN) self._serial.close() def _read(self, size=1, timeout=None): self._serial.timeout = timeout try: in_waiting = self._serial.in_waiting if in_waiting > 3072: logger.warning("Serial port input buffer size exceeds 3072 bytes. " "Data will be lost if buffer size exceeds 4095 bytes. \n" "Input buffer size: {}".format(in_waiting)) result = self._serial.read(size=size) if self._read_errors: self._read_errors -= 1 except serial.SerialException as err: self._read_errors += 1 no_data = "read but returned no data" in repr(err) read_reopen = self._properties[props.READ_REOPEN] if self._read_errors > self._max_read_errors or not no_data or not read_reopen: raise logger.warn("Recovering from read returned no data, try {}/{}".format( self._read_errors, self._max_read_errors)) self.close() end_time = time.time() + REOPEN_TIMEOUT while not os.path.exists(self._serial.port) and time.time() < end_time: time.sleep(0.001) self.open() result = self._read(size=size, timeout=timeout) return result def _write(self, data, timeout=None): if isinstance(data, six.text_type): data = data.encode("utf-8", errors="replace") use_flow_control = ( self._properties[props.BAUDRATE] > DEFAULT_BAUDRATE and self._properties[props.USE_HIGH_BAUDRATE_FLOW_CONTROL]) if use_flow_control: self.send_xoff() self._serial.write_timeout = timeout bytes_written = self._serial.write(data) if use_flow_control: self.send_xon() return bytes_written def set_property(self, key, value): super(SerialTransport, self).set_property(key, value) if self._serial.isOpen(): if key == props.BAUDRATE: self._serial.baudrate = value if key == props.BYTESIZE: self._serial.bytesize = value if key == props.PARITY: self._serial.parity = value if key == props.STOPBITS: self._serial.stopbits = value if key == props.XONXOFF: self._serial.xonxoff = value if key == props.RTSCTS: self._serial.rtscts = value if key == props.DSRDTR: self._serial.dsrdtr = value if key == props.EXCLUSIVE: self._serial.exclusive = value def flush_buffers(self): self._serial.flush() self._serial.reset_input_buffer() self._serial.reset_output_buffer() def send_xon(self): self._serial.set_input_flow_control(enable=True)
Apache License 2.0
kubevirt/client-python
kubevirt/models/v1_kube_virt_configuration.py
V1KubeVirtConfiguration.handler_configuration
python
def handler_configuration(self, handler_configuration): self._handler_configuration = handler_configuration
Sets the handler_configuration of this V1KubeVirtConfiguration. :param handler_configuration: The handler_configuration of this V1KubeVirtConfiguration. :type: V1ReloadableComponentConfiguration
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_kube_virt_configuration.py#L319-L327
from pprint import pformat from six import iteritems import re class V1KubeVirtConfiguration(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_configuration': 'V1ReloadableComponentConfiguration', 'controller_configuration': 'V1ReloadableComponentConfiguration', 'cpu_model': 'str', 'cpu_request': 'K8sIoApimachineryPkgApiResourceQuantity', 'default_runtime_class': 'str', 'developer_configuration': 'V1DeveloperConfiguration', 'emulated_machines': 'list[str]', 'handler_configuration': 'V1ReloadableComponentConfiguration', 'image_pull_policy': 'str', 'machine_type': 'str', 'mediated_devices_configuration': 'V1MediatedDevicesConfiguration', 'mem_balloon_stats_period': 'int', 'migrations': 'V1MigrationConfiguration', 'min_cpu_model': 'str', 'network': 'V1NetworkConfiguration', 'obsolete_cpu_models': 'dict(str, bool)', 'ovmf_path': 'str', 'permitted_host_devices': 'V1PermittedHostDevices', 'selinux_launcher_type': 'str', 'smbios': 'V1SMBiosConfiguration', 'supported_guest_agent_versions': 'list[str]', 'virtual_machine_instances_per_node': 'int', 'webhook_configuration': 'V1ReloadableComponentConfiguration' } attribute_map = { 'api_configuration': 'apiConfiguration', 'controller_configuration': 'controllerConfiguration', 'cpu_model': 'cpuModel', 'cpu_request': 'cpuRequest', 'default_runtime_class': 'defaultRuntimeClass', 'developer_configuration': 'developerConfiguration', 'emulated_machines': 'emulatedMachines', 'handler_configuration': 'handlerConfiguration', 'image_pull_policy': 'imagePullPolicy', 'machine_type': 'machineType', 'mediated_devices_configuration': 'mediatedDevicesConfiguration', 'mem_balloon_stats_period': 'memBalloonStatsPeriod', 'migrations': 'migrations', 'min_cpu_model': 'minCPUModel', 'network': 'network', 'obsolete_cpu_models': 'obsoleteCPUModels', 'ovmf_path': 'ovmfPath', 'permitted_host_devices': 'permittedHostDevices', 'selinux_launcher_type': 'selinuxLauncherType', 'smbios': 'smbios', 'supported_guest_agent_versions': 'supportedGuestAgentVersions', 'virtual_machine_instances_per_node': 'virtualMachineInstancesPerNode', 'webhook_configuration': 'webhookConfiguration' } def __init__(self, api_configuration=None, controller_configuration=None, cpu_model=None, cpu_request=None, default_runtime_class=None, developer_configuration=None, emulated_machines=None, handler_configuration=None, image_pull_policy=None, machine_type=None, mediated_devices_configuration=None, mem_balloon_stats_period=None, migrations=None, min_cpu_model=None, network=None, obsolete_cpu_models=None, ovmf_path=None, permitted_host_devices=None, selinux_launcher_type=None, smbios=None, supported_guest_agent_versions=None, virtual_machine_instances_per_node=None, webhook_configuration=None): self._api_configuration = None self._controller_configuration = None self._cpu_model = None self._cpu_request = None self._default_runtime_class = None self._developer_configuration = None self._emulated_machines = None self._handler_configuration = None self._image_pull_policy = None self._machine_type = None self._mediated_devices_configuration = None self._mem_balloon_stats_period = None self._migrations = None self._min_cpu_model = None self._network = None self._obsolete_cpu_models = None self._ovmf_path = None self._permitted_host_devices = None self._selinux_launcher_type = None self._smbios = None self._supported_guest_agent_versions = None self._virtual_machine_instances_per_node = None self._webhook_configuration = None if api_configuration is not None: self.api_configuration = api_configuration if controller_configuration is not None: self.controller_configuration = controller_configuration if cpu_model is not None: self.cpu_model = cpu_model if cpu_request is not None: self.cpu_request = cpu_request if default_runtime_class is not None: self.default_runtime_class = default_runtime_class if developer_configuration is not None: self.developer_configuration = developer_configuration if emulated_machines is not None: self.emulated_machines = emulated_machines if handler_configuration is not None: self.handler_configuration = handler_configuration if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if machine_type is not None: self.machine_type = machine_type if mediated_devices_configuration is not None: self.mediated_devices_configuration = mediated_devices_configuration if mem_balloon_stats_period is not None: self.mem_balloon_stats_period = mem_balloon_stats_period if migrations is not None: self.migrations = migrations if min_cpu_model is not None: self.min_cpu_model = min_cpu_model if network is not None: self.network = network if obsolete_cpu_models is not None: self.obsolete_cpu_models = obsolete_cpu_models if ovmf_path is not None: self.ovmf_path = ovmf_path if permitted_host_devices is not None: self.permitted_host_devices = permitted_host_devices if selinux_launcher_type is not None: self.selinux_launcher_type = selinux_launcher_type if smbios is not None: self.smbios = smbios if supported_guest_agent_versions is not None: self.supported_guest_agent_versions = supported_guest_agent_versions if virtual_machine_instances_per_node is not None: self.virtual_machine_instances_per_node = virtual_machine_instances_per_node if webhook_configuration is not None: self.webhook_configuration = webhook_configuration @property def api_configuration(self): return self._api_configuration @api_configuration.setter def api_configuration(self, api_configuration): self._api_configuration = api_configuration @property def controller_configuration(self): return self._controller_configuration @controller_configuration.setter def controller_configuration(self, controller_configuration): self._controller_configuration = controller_configuration @property def cpu_model(self): return self._cpu_model @cpu_model.setter def cpu_model(self, cpu_model): self._cpu_model = cpu_model @property def cpu_request(self): return self._cpu_request @cpu_request.setter def cpu_request(self, cpu_request): self._cpu_request = cpu_request @property def default_runtime_class(self): return self._default_runtime_class @default_runtime_class.setter def default_runtime_class(self, default_runtime_class): self._default_runtime_class = default_runtime_class @property def developer_configuration(self): return self._developer_configuration @developer_configuration.setter def developer_configuration(self, developer_configuration): self._developer_configuration = developer_configuration @property def emulated_machines(self): return self._emulated_machines @emulated_machines.setter def emulated_machines(self, emulated_machines): self._emulated_machines = emulated_machines @property def handler_configuration(self): return self._handler_configuration @handler_configuration.setter
Apache License 2.0
rick-mccoy/reformer-pytorch
utils/utils.py
look_back
python
def look_back(input_tensor: torch.Tensor) -> torch.Tensor: shift = torch.cat([input_tensor[:, -1:], input_tensor[:, :-1]], dim=1) concat = torch.cat([shift, input_tensor], dim=2) return concat
Looks back one bucket
https://github.com/rick-mccoy/reformer-pytorch/blob/ba03080a44c37ad2b97173097c6c383e84f45393/utils/utils.py#L28-L36
import os import time import torch import numpy as np from tqdm import tqdm from datasets.music import roll_to_midi def init_fn(worker_id): return np.random.seed(torch.initial_seed() % (2 ** 32) + worker_id) def merge_hp(hp, args): for key, value in hp.model.items(): setattr(args, key, value) for key, value in hp.data.items(): setattr(args, key, value) for key, value in hp.train.items(): setattr(args, key, value) return args def deterministic_dropout(x: torch.Tensor, seed=0, dropout=0): generator = torch.Generator(device=x.get_device()) generator.manual_seed(seed) dropout_mask = torch.bernoulli(x, p=1 - dropout, generator=generator) return dropout_mask * x / (1 - dropout)
MIT License
jalaali/jalaali-python
jalaali/jalaali.py
Jalaali.j2d
python
def j2d(jy, jm, jd): r = Jalaali.jal_cal(jy) return Jalaali.g2d(r['gy'], 3, r['march']) + (jm - 1) * 31 - div(jm, 7) * (jm - 7) + jd - 1
Converts a date of the Jalaali calendar to the julian day number. :param jy: Jalaali Year (1 to 3100) :type jy: int :param jm: Jalaali Month (1 to 12) :type jm: int :param jd: Jalaali Day (1 to 29/31) :type jd: int :return: Julian Day number :rtype: int
https://github.com/jalaali/jalaali-python/blob/45c55a063afe5900eff2595d6091c2527e11db99/jalaali/jalaali.py#L169-L182
from __future__ import division def div(a, b): return int(a / b) def mod(a, b): return a - div(a, b) * b class Jalaali: @staticmethod def to_jalaali(gy, gm, gd): return Jalaali.d2j(Jalaali.g2d(gy, gm, gd)) @staticmethod def to_gregorian(jy, jm, jd): return Jalaali.d2g(Jalaali.j2d(jy, jm, jd)) @staticmethod def is_valid_jalaali_date(jy, jm, jd): year_is_valid = (-61 <= jy <= 3177) month_is_valid = (1 <= jm <= 12) day_is_valid = (1 <= jd <= Jalaali.jalaali_month_length(jy, jm)) return year_is_valid and month_is_valid and day_is_valid @staticmethod def is_leap_jalaali_year(jy): return Jalaali.jal_cal(jy)['leap'] == 0 @staticmethod def jalaali_month_length(jy, jm): if jm <= 6: return 31 if jm <= 11: return 30 if Jalaali.is_leap_jalaali_year(jy): return 30 return 29 @staticmethod def jal_cal(jy): breaks = [-61, 9, 38, 199, 426, 686, 756, 818, 1111, 1181, 1210, 1635, 2060, 2097, 2192, 2262, 2324, 2394, 2456, 3178] b1 = len(breaks) gy = jy + 621 leap_j = -14 jp = breaks[0] jump = None if jy < jp or jy >= breaks[b1 - 1]: raise Exception('Invalid Jalaali year ' + str(jy)) for i in range(1, b1): jm = breaks[i] jump = jm - jp if jy < jm: break leap_j = leap_j + div(jump, 33) * 8 + div(mod(jump, 33), 4) jp = jm n = jy - jp leap_j = leap_j + div(n, 33) * 8 + div(mod(n, 33) + 3, 4) if mod(jump, 33) == 4 and jump - n == 4: leap_j += 1 leap_g = div(gy, 4) - div((div(gy, 100) + 1) * 3, 4) - 150 march = 20 + leap_j - leap_g if jump - n < 6: n = n - jump + div(jump + 4, 33) * 33 leap = mod(mod(n + 1, 33) - 1, 4) if leap == -1: leap = 4 return {'leap': leap, 'gy': gy, 'march': march} @staticmethod
MIT License
peterdsharpe/aerosandbox
aerosandbox/visualization/carpet_plot_utils.py
remove_nans
python
def remove_nans(array): return array[~np.isnan(array)]
Removes NaN values in a 1D array. Args: array: a 1D array of data. Returns: The array with all NaN values stripped.
https://github.com/peterdsharpe/aerosandbox/blob/8fbf9449cba2f02e14424690ba2e34b438f21c69/aerosandbox/visualization/carpet_plot_utils.py#L49-L58
import aerosandbox.numpy as np import signal from contextlib import contextmanager import sys @contextmanager def time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError() try: signal.signal(signal.SIGALRM, signal_handler) except AttributeError: raise OSError("signal.SIGALRM could not be found. This is probably because you're not using Linux.") signal.alarm(seconds) try: yield finally: signal.alarm(0)
MIT License
facebookresearch/mtenv
local_dm_control_suite/manipulator.py
Bring.__init__
python
def __init__(self, use_peg, insert, fully_observable, random=None): self._use_peg = use_peg self._target = "target_peg" if use_peg else "target_ball" self._object = "peg" if self._use_peg else "ball" self._object_joints = ["_".join([self._object, dim]) for dim in "xzy"] self._receptacle = "slot" if self._use_peg else "cup" self._insert = insert self._fully_observable = fully_observable super(Bring, self).__init__(random=random)
Initialize an instance of the `Bring` task. Args: use_peg: A `bool`, whether to replace the ball prop with the peg prop. insert: A `bool`, whether to insert the prop in a receptacle. fully_observable: A `bool`, whether the observation should contain the position and velocity of the object being manipulated and the target location. random: Optional, either a `numpy.random.RandomState` instance, an integer seed for creating a new `RandomState`, or None to select a seed automatically (default).
https://github.com/facebookresearch/mtenv/blob/4a6d9d6fdfb321f1b51f890ef36b5161359e972d/local_dm_control_suite/manipulator.py#L197-L217
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from dm_control import mujoco from dm_control.rl import control from . import base from . import common from dm_control.utils import containers from dm_control.utils import rewards from dm_control.utils import xml_tools from lxml import etree import numpy as np _CLOSE = 0.01 _CONTROL_TIMESTEP = 0.01 _TIME_LIMIT = 10 _P_IN_HAND = 0.1 _P_IN_TARGET = 0.1 _ARM_JOINTS = [ "arm_root", "arm_shoulder", "arm_elbow", "arm_wrist", "finger", "fingertip", "thumb", "thumbtip", ] _ALL_PROPS = frozenset(["ball", "target_ball", "cup", "peg", "target_peg", "slot"]) SUITE = containers.TaggedTasks() def make_model(use_peg, insert): xml_string = common.read_model("manipulator.xml") parser = etree.XMLParser(remove_blank_text=True) mjcf = etree.XML(xml_string, parser) if use_peg: required_props = ["peg", "target_peg"] if insert: required_props += ["slot"] else: required_props = ["ball", "target_ball"] if insert: required_props += ["cup"] for unused_prop in _ALL_PROPS.difference(required_props): prop = xml_tools.find_element(mjcf, "body", unused_prop) prop.getparent().remove(prop) return etree.tostring(mjcf, pretty_print=True), common.ASSETS @SUITE.add("benchmarking", "hard") def bring_ball( fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None ): use_peg = False insert = False physics = Physics.from_xml_string(*make_model(use_peg, insert)) task = Bring( use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random ) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit, **environment_kwargs ) @SUITE.add("hard") def bring_peg( fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None ): use_peg = True insert = False physics = Physics.from_xml_string(*make_model(use_peg, insert)) task = Bring( use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random ) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit, **environment_kwargs ) @SUITE.add("hard") def insert_ball( fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None ): use_peg = False insert = True physics = Physics.from_xml_string(*make_model(use_peg, insert)) task = Bring( use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random ) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit, **environment_kwargs ) @SUITE.add("hard") def insert_peg( fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None ): use_peg = True insert = True physics = Physics.from_xml_string(*make_model(use_peg, insert)) task = Bring( use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random ) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit, **environment_kwargs ) class Physics(mujoco.Physics): def bounded_joint_pos(self, joint_names): joint_pos = self.named.data.qpos[joint_names] return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T def joint_vel(self, joint_names): return self.named.data.qvel[joint_names] def body_2d_pose(self, body_names, orientation=True): if not isinstance(body_names, str): body_names = np.array(body_names).reshape(-1, 1) pos = self.named.data.xpos[body_names, ["x", "z"]] if orientation: ori = self.named.data.xquat[body_names, ["qw", "qy"]] return np.hstack([pos, ori]) else: return pos def touch(self): return np.log1p(self.data.sensordata) def site_distance(self, site1, site2): site1_to_site2 = np.diff(self.named.data.site_xpos[[site2, site1]], axis=0) return np.linalg.norm(site1_to_site2) class Bring(base.Task):
MIT License
elastic/enterprise-search-python
elastic_enterprise_search/client/_workplace_search.py
WorkplaceSearch.delete_documents
python
def delete_documents( self, content_source_id, document_ids, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "POST", to_path( "api", "ws", "v1", "sources", content_source_id, "documents", "bulk_destroy", ), body=document_ids, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, )
Deletes a list of documents from a custom content source `<https://www.elastic.co/guide/en/workplace-search/master/workplace-search-custom-sources-api.html#delete-by-id>`_ :arg content_source_id: Unique ID for a Custom API source, provided upon creation of a Custom API Source :arg document_ids: HTTP request body :arg params: Additional query params to send with the request :arg headers: Additional headers to send with the request :arg http_auth: Access token or HTTP basic auth username and password to send with the request :arg request_timeout: Timeout in seconds :arg ignore_status: HTTP status codes to not raise an error :raises elastic_enterprise_search.BadRequestError: :raises elastic_enterprise_search.UnauthorizedError: :raises elastic_enterprise_search.NotFoundError: :raises elastic_enterprise_search.PayloadTooLargeError:
https://github.com/elastic/enterprise-search-python/blob/1788413218badc01e2da23ac290698de40117f8c/elastic_enterprise_search/client/_workplace_search.py#L634-L685
from elastic_transport import QueryParams from .._utils import ( DEFAULT, SKIP_IN_PATH, to_array, to_deep_object, to_path, ) from ._base import BaseClient class WorkplaceSearch(BaseClient): def create_analytics_event( self, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): params = QueryParams(params) return self.perform_request( "POST", "/api/ws/v1/analytics/event", body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def get_auto_query_refinement_details( self, content_source_id, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "GET", to_path( "api", "ws", "v1", "sources", content_source_id, "automatic_query_refinement", ), params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def create_batch_synonym_sets( self, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): params = QueryParams(params) return self.perform_request( "POST", "/api/ws/v1/synonyms", body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def command_sync_jobs( self, content_source_id, body, job_type=None, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) if job_type is not None: for v in to_array(job_type, param="job_type"): params.add("job_type[]", v) return self.perform_request( "POST", to_path( "api", "ws", "v1", "sources", content_source_id, "sync", "jobs", ), body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def create_content_source( self, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): params = QueryParams(params) return self.perform_request( "POST", "/api/ws/v1/sources", body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def delete_content_source( self, content_source_id, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "DELETE", to_path( "api", "ws", "v1", "sources", content_source_id, ), params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def get_content_source( self, content_source_id, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "GET", to_path( "api", "ws", "v1", "sources", content_source_id, ), params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def put_content_source_icons( self, content_source_id, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "PUT", to_path( "api", "ws", "v1", "sources", content_source_id, "icon", ), body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def put_content_source( self, content_source_id, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "PUT", to_path( "api", "ws", "v1", "sources", content_source_id, ), body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def list_content_sources( self, current_page=None, page_size=None, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): params = QueryParams(params) if current_page is not None: params.add("page[current]", current_page) if page_size is not None: params.add("page[size]", page_size) return self.perform_request( "GET", "/api/ws/v1/sources", params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def get_current_user( self, get_token=None, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): params = QueryParams(params) if get_token is not None: params.add("get_token", get_token) return self.perform_request( "GET", "/api/ws/v1/whoami", params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def get_document( self, content_source_id, document_id, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): for param in ( content_source_id, document_id, ): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "GET", to_path( "api", "ws", "v1", "sources", content_source_id, "documents", document_id, ), params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, ) def delete_documents_by_query( self, content_source_id, body, params=None, headers=None, http_auth=DEFAULT, request_timeout=DEFAULT, ignore_status=(), ): if content_source_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument") params = QueryParams(params) return self.perform_request( "DELETE", to_path( "api", "ws", "v1", "sources", content_source_id, "documents", ), body=body, params=params, headers=headers, http_auth=http_auth, request_timeout=request_timeout, ignore_status=ignore_status, )
Apache License 2.0
fluiddyn/transonic
transonic/util.py
get_info_from_ipython
python
def get_info_from_ipython(): src = get_ipython_input() hex_input = make_hex(src) dummy_filename = "__ipython__" + hex_input return src, dummy_filename
Get the input code and a "filename" when called from IPython
https://github.com/fluiddyn/transonic/blob/f0896493301b3bdc30ae97a5c04747aa955a2350/transonic/util.py#L297-L302
import os import sys import inspect import re from pathlib import Path import importlib.util import shutil from textwrap import dedent from typing import Callable import gast as ast from transonic.config import backend_default try: import black except ImportError: import autopep8 def format_str(src_contents): return autopep8.fix_code(src_contents) else: try: _mode = black.FileMode(line_length=82) except TypeError: def format_str(src_contents: str): try: return black.format_str(src_contents, line_length=82) except black.InvalidInput: print("black.InvalidInput\n" + src_contents) raise else: def format_str(src_contents: str): try: return black.format_str(src_contents, mode=_mode) except black.InvalidInput: print("black.InvalidInput\n" + src_contents) raise try: from IPython.core.getipython import get_ipython except ImportError: pass from transonic import __version__ from transonic.analyses import extast from transonic.compiler import ( ext_suffix, make_hex, modification_date, has_to_build, ) from transonic.config import path_root, strtobool __all__ = ["modification_date", "has_to_build", "path_root"] def can_import_accelerator(backend: str = backend_default): if backend == "pythran": try: import pythran except ImportError: return False elif backend == "cython": try: import cython except ImportError: return False elif backend == "numba": try: import numba except ImportError: return False elif backend == "python": return True else: raise NotImplementedError return True def print_versions(accelerators=None): print(f"Transonic {__version__}") if accelerators is None or "pythran" in accelerators: try: import pythran except ImportError: print("Pythran not importable") else: print(f"Pythran {pythran.__version__}") if accelerators is None or "numba" in accelerators: try: import numba except ImportError: print("Numba not importable") else: print(f"Numba {numba.__version__}") if accelerators is None or "cython" in accelerators: try: import Cython except ImportError: print("Cython not importable") else: print(f"Cython {Cython.__version__}") def find_module_name_from_path(path_py: Path): path_py = Path(path_py) cwd = Path.cwd() path = path_py.absolute().parent module_name = path_py.stem special_dir = "__jit_class__" if special_dir in path.parts: tmp = [special_dir] name_pack = ".".join(path.parts[path.parts.index(special_dir) + 1 :]) if name_pack: tmp.append(name_pack) tmp.append(module_name) return ".".join(tmp) while path.parents: if path == cwd or str(path) in sys.path: return module_name module_name = path.name + "." + module_name path = path.parent return path_py.stem def get_module_name(frame): module = inspect.getmodule(frame[0]) filename = None if module is not None: module_name = module.__name__ if module_name in ("__main__", "<run_path>"): filename = frame.filename else: filename = frame.filename if filename is not None: module_name = find_module_name_from_path(Path(filename)) if module_name is None: src, module_name = get_info_from_ipython() return module_name def get_name_calling_module(index_frame: int = 1): try: frame = inspect.stack()[index_frame] except IndexError: print("index_frame", index_frame) print([frame[1] for frame in inspect.stack()]) raise return get_module_name(frame) def get_source_without_decorator(func: Callable): src = inspect.getsource(func) src = dedent(src) return strip_typehints(re.sub(r"@.*?\sdef\s", "def ", src)) class TypeHintRemover(ast.NodeTransformer): def visit_FunctionDef(self, fdef): fdef.returns = None if fdef.args.args: for arg in fdef.args.args: arg.annotation = None body = [] for node in fdef.body: if isinstance(node, ast.AnnAssign): if node.value is None: continue node = ast.Assign( targets=[node.target], value=node.value, type_comment=None ) body.append(node) fdef.body = body return fdef def strip_typehints(source): source = format_str(source) parsed_source = ast.parse(source) transformed = TypeHintRemover().visit(parsed_source) striped_code = extast.unparse(transformed) return striped_code def make_code_from_fdef_node(fdef): transformed = TypeHintRemover().visit(fdef) code = extast.unparse(transformed) return format_str(code) def get_ipython_input(last=True): ip = get_ipython() hist_raw = ip.history_manager.input_hist_raw if last: return hist_raw[-1] else: return "\n".join(hist_raw)
BSD 3-Clause New or Revised License
wikimedia/pywikibot
pywikibot/__init__.py
_WbDataPage._validate
python
def _validate(page: 'Page', data_site: 'BaseSite', ending: str, label: str) -> None: if not isinstance(page, Page): raise ValueError( 'Page {} must be a pywikibot.Page object not a {}.' .format(page, type(page))) if not page.exists(): raise ValueError('Page {} must exist.'.format(page)) if not data_site: raise ValueError( 'The provided site does not support {}.'.format(label)) if page.site != data_site: raise ValueError( 'Page must be on the {} repository site.'.format(label)) if not page.title().startswith('Data:') or not page.title().endswith(ending): raise ValueError( "Page must be in 'Data:' namespace and end in '{}' " 'for {}.'.format(ending, label))
Validate the provided page against general and type specific rules. :param page: Page containing the data. :param data_site: The site serving as a repository for the given data type. :param ending: Required filetype-like ending in page titles. E.g. '.map' :param label: Label describing the data type in error messages.
https://github.com/wikimedia/pywikibot/blob/5097f5b9a7ef9d39f35f17edd11faf3086a01d1d/pywikibot/__init__.py#L899-L937
import atexit import datetime import math import re import sys import threading import time from contextlib import suppress from decimal import Decimal from queue import Queue from typing import Any, Optional, Union from urllib.parse import urlparse from warnings import warn from pywikibot import config as _config from pywikibot import exceptions from pywikibot.__metadata__ import ( __copyright__, __description__, __download_url__, __license__, __maintainer__, __maintainer_email__, __name__, __url__, __version__, ) from pywikibot._wbtypes import WbRepresentation as _WbRepresentation from pywikibot.backports import ( cache, removesuffix, Callable, Dict, List, Tuple, ) from pywikibot.bot import ( Bot, CurrentPageBot, WikidataBot, calledModuleName, handle_args, input, input_choice, input_yn, show_help, ui, ) from pywikibot.diff import PatchManager from pywikibot.family import AutoFamily, Family from pywikibot.i18n import translate from pywikibot.logging import ( critical, debug, error, exception, log, output, stdout, warning, ) from pywikibot.site import APISite, BaseSite, DataSite from pywikibot.tools import classproperty from pywikibot.tools import normalize_username from pywikibot.tools.formatter import color_format TO_DECIMAL_TYPE = Union[int, float, str, 'Decimal', None] STR_OR_TIMESTAMP = Any OPT_STR_OR_SITE = Any OPT_STR_OR_ITEM_PAGE = Any OPT_STR_OR_FAMILY = Any TIMESTAMP_CLASS = Any COORDINATE_CLASS = Any WB_TIME_CLASS = Any WB_QUANTITY_CLASS = Any WB_MONOLINGUAL_TEXT_CLASS = Any WB_DATA_PAGE_CLASS = Any WB_GEO_SHAPE_CLASS = Any WB_TABULAR_DATA_CLASS = Any WB_UNKNOWN_CLASS = Any __all__ = ( '__copyright__', '__description__', '__download_url__', '__license__', '__maintainer__', '__maintainer_email__', '__name__', '__url__', '__version__', 'Bot', 'calledModuleName', 'Category', 'Claim', 'Coordinate', 'critical', 'CurrentPageBot', 'debug', 'error', 'exception', 'FilePage', 'handle_args', 'html2unicode', 'input', 'input_choice', 'input_yn', 'ItemPage', 'Link', 'log', 'MediaInfo', 'output', 'Page', 'PropertyPage', 'showDiff', 'show_help', 'Site', 'SiteLink', 'stdout', 'Timestamp', 'translate', 'ui', 'unicode2html', 'UploadWarning', 'url2unicode', 'User', 'warning', 'WbGeoShape', 'WbMonolingualText', 'WbQuantity', 'WbTabularData', 'WbTime', 'WbUnknown', 'WikidataBot', ) if not hasattr(sys.modules[__name__], 'argvu'): argvu = [] class Timestamp(datetime.datetime): mediawikiTSFormat = '%Y%m%d%H%M%S' _ISO8601Format_new = '{0:+05d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z' def clone(self) -> datetime.datetime: return self.replace(microsecond=self.microsecond) @classproperty def ISO8601Format(cls: TIMESTAMP_CLASS) -> str: return cls._ISO8601Format() @classmethod def _ISO8601Format(cls: TIMESTAMP_CLASS, sep: str = 'T') -> str: assert len(sep) == 1 return '%Y-%m-%d{}%H:%M:%SZ'.format(sep) @classmethod def fromISOformat(cls: TIMESTAMP_CLASS, ts: STR_OR_TIMESTAMP, sep: str = 'T') -> 'Timestamp': if isinstance(ts, cls): return ts.clone() return cls.strptime(ts, cls._ISO8601Format(sep)) @classmethod def fromtimestampformat(cls: TIMESTAMP_CLASS, ts: STR_OR_TIMESTAMP ) -> 'Timestamp': if isinstance(ts, cls): return ts.clone() if len(ts) == 8: ts += '000' return cls.strptime(ts, cls.mediawikiTSFormat) def isoformat(self, sep: str = 'T') -> str: return self.strftime(self._ISO8601Format(sep)) def totimestampformat(self) -> str: return self.strftime(self.mediawikiTSFormat) def __str__(self) -> str: return self.isoformat() def __add__(self, other: datetime.timedelta) -> 'Timestamp': newdt = super().__add__(other) if isinstance(newdt, datetime.datetime): return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour, newdt.minute, newdt.second, newdt.microsecond, newdt.tzinfo) return newdt def __sub__(self, other: datetime.timedelta ) -> 'Timestamp': newdt = super().__sub__(other) if isinstance(newdt, datetime.datetime): return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour, newdt.minute, newdt.second, newdt.microsecond, newdt.tzinfo) return newdt class Coordinate(_WbRepresentation): _items = ('lat', 'lon', 'entity') def __init__(self, lat: float, lon: float, alt: Optional[float] = None, precision: Optional[float] = None, globe: Optional[str] = None, typ: str = '', name: str = '', dim: Optional[int] = None, site: Optional[DataSite] = None, globe_item: OPT_STR_OR_ITEM_PAGE = None, primary: bool = False) -> None: self.lat = lat self.lon = lon self.alt = alt self._precision = precision self._entity = globe_item self.type = typ self.name = name self._dim = dim self.site = site or Site().data_repository() self.primary = primary if globe: globe = globe.lower() elif not globe_item: globe = self.site.default_globe() self.globe = globe @property def entity(self) -> str: if not self._entity: if self.globe not in self.site.globes(): raise exceptions.CoordinateGlobeUnknownError( '{} is not supported in Wikibase yet.' .format(self.globe)) return self.site.globes()[self.globe] if isinstance(self._entity, ItemPage): return self._entity.concept_uri() return self._entity def toWikibase(self) -> Dict[str, Any]: return {'latitude': self.lat, 'longitude': self.lon, 'altitude': self.alt, 'globe': self.entity, 'precision': self.precision, } @classmethod def fromWikibase(cls: COORDINATE_CLASS, data: Dict[str, Any], site: Optional[DataSite] = None) -> 'Coordinate': if site is None: site = Site().data_repository() globe = None if data['globe']: globes = {} for name, entity in site.globes().items(): globes[entity] = name globe = globes.get(data['globe']) return cls(data['latitude'], data['longitude'], data['altitude'], data['precision'], globe, site=site, globe_item=data['globe']) @property def precision(self) -> Optional[float]: if self._dim is None and self._precision is None: return None if self._precision is None and self._dim is not None: radius = 6378137 self._precision = math.degrees( self._dim / (radius * math.cos(math.radians(self.lat)))) return self._precision @precision.setter def precision(self, value: float) -> None: self._precision = value def precisionToDim(self) -> Optional[int]: if self._dim is None and self._precision is None: raise ValueError('No values set for dim or precision') if self._dim is None and self._precision is not None: radius = 6378137 self._dim = int( round( math.radians(self._precision) * radius * math.cos( math.radians(self.lat)) ) ) return self._dim def get_globe_item(self, repo: Optional[DataSite] = None, lazy_load: bool = False) -> 'ItemPage': if isinstance(self._entity, ItemPage): return self._entity repo = repo or self.site return ItemPage.from_entity_uri(repo, self.entity, lazy_load) class WbTime(_WbRepresentation): PRECISION = {'1000000000': 0, '100000000': 1, '10000000': 2, '1000000': 3, '100000': 4, '10000': 5, 'millenia': 6, 'century': 7, 'decade': 8, 'year': 9, 'month': 10, 'day': 11, 'hour': 12, 'minute': 13, 'second': 14 } FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z' _items = ('year', 'month', 'day', 'hour', 'minute', 'second', 'precision', 'before', 'after', 'timezone', 'calendarmodel') def __init__(self, year: Optional[int] = None, month: Optional[int] = None, day: Optional[int] = None, hour: Optional[int] = None, minute: Optional[int] = None, second: Optional[int] = None, precision: Union[int, str, None] = None, before: int = 0, after: int = 0, timezone: int = 0, calendarmodel: Optional[str] = None, site: Optional[DataSite] = None) -> None: if year is None: raise ValueError('no year given') self.precision = self.PRECISION['second'] if second is None: self.precision = self.PRECISION['minute'] second = 0 if minute is None: self.precision = self.PRECISION['hour'] minute = 0 if hour is None: self.precision = self.PRECISION['day'] hour = 0 if day is None: self.precision = self.PRECISION['month'] day = 1 if month is None: self.precision = self.PRECISION['year'] month = 1 self.year = year self.month = month self.day = day self.hour = hour self.minute = minute self.second = second self.after = after self.before = before self.timezone = timezone if calendarmodel is None: if site is None: site = Site().data_repository() if site is None: raise ValueError('Site {} has no data repository' .format(Site())) calendarmodel = site.calendarmodel() self.calendarmodel = calendarmodel if precision is not None: if (isinstance(precision, int) and precision in self.PRECISION.values()): self.precision = precision elif precision in self.PRECISION: assert isinstance(precision, str) self.precision = self.PRECISION[precision] else: raise ValueError('Invalid precision: "{}"'.format(precision)) @classmethod def fromTimestr(cls: WB_TIME_CLASS, datetimestr: str, precision: Union[int, str] = 14, before: int = 0, after: int = 0, timezone: int = 0, calendarmodel: Optional[str] = None, site: Optional[DataSite] = None) -> 'WbTime': match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z', datetimestr) if not match: raise ValueError("Invalid format: '{}'".format(datetimestr)) t = match.groups() return cls(int(t[0]), int(t[1]), int(t[2]), int(t[3]), int(t[4]), int(t[5]), precision, before, after, timezone, calendarmodel, site) @classmethod def fromTimestamp(cls: WB_TIME_CLASS, timestamp: 'Timestamp', precision: Union[int, str] = 14, before: int = 0, after: int = 0, timezone: int = 0, calendarmodel: Optional[str] = None, site: Optional[DataSite] = None) -> 'WbTime': return cls.fromTimestr(timestamp.isoformat(), precision=precision, before=before, after=after, timezone=timezone, calendarmodel=calendarmodel, site=site) def toTimestr(self, force_iso: bool = False) -> str: if force_iso: return Timestamp._ISO8601Format_new.format( self.year, max(1, self.month), max(1, self.day), self.hour, self.minute, self.second) return self.FORMATSTR.format(self.year, self.month, self.day, self.hour, self.minute, self.second) def toTimestamp(self) -> Timestamp: if self.year <= 0: raise ValueError('You cannot turn BC dates into a Timestamp') return Timestamp.fromISOformat( self.toTimestr(force_iso=True).lstrip('+')) def toWikibase(self) -> Dict[str, Any]: json = {'time': self.toTimestr(), 'precision': self.precision, 'after': self.after, 'before': self.before, 'timezone': self.timezone, 'calendarmodel': self.calendarmodel } return json @classmethod def fromWikibase(cls: WB_TIME_CLASS, data: Dict[str, Any], site: Optional[DataSite] = None) -> 'WbTime': return cls.fromTimestr(data['time'], data['precision'], data['before'], data['after'], data['timezone'], data['calendarmodel'], site) class WbQuantity(_WbRepresentation): _items = ('amount', 'upperBound', 'lowerBound', 'unit') @staticmethod def _require_errors(site: Optional[DataSite]) -> bool: if not site: warning( "WbQuantity now expects a 'site' parameter. This is needed to " 'ensure correct handling of error bounds.') return False return site.mw_version < '1.29.0-wmf.2' @staticmethod def _todecimal(value: TO_DECIMAL_TYPE) -> Optional[Decimal]: if isinstance(value, Decimal): return value if value is None: return None return Decimal(str(value)) @staticmethod def _fromdecimal(value: Optional[Decimal]) -> Optional[str]: return format(value, '+g') if value is not None else None def __init__(self, amount: TO_DECIMAL_TYPE, unit: OPT_STR_OR_ITEM_PAGE = None, error: Union[TO_DECIMAL_TYPE, Tuple[TO_DECIMAL_TYPE, TO_DECIMAL_TYPE]] = None, site: Optional[DataSite] = None) -> None: if amount is None: raise ValueError('no amount given') self.amount = self._todecimal(amount) self._unit = unit self.site = site or Site().data_repository() if isinstance(unit, str) and unit.partition('://')[0] not in ('http', 'https'): raise ValueError("'unit' must be an ItemPage or entity uri.") if error is None and not self._require_errors(site): self.upperBound = self.lowerBound = None else: if error is None: upperError = lowerError = Decimal(0) elif isinstance(error, tuple): upperError = self._todecimal(error[0]) lowerError = self._todecimal(error[1]) else: upperError = lowerError = self._todecimal(error) assert upperError is not None and lowerError is not None assert self.amount is not None self.upperBound = self.amount + upperError self.lowerBound = self.amount - lowerError @property def unit(self) -> str: if isinstance(self._unit, ItemPage): return self._unit.concept_uri() return self._unit or '1' def get_unit_item(self, repo: Optional[DataSite] = None, lazy_load: bool = False) -> 'ItemPage': if not isinstance(self._unit, str): return self._unit repo = repo or self.site self._unit = ItemPage.from_entity_uri(repo, self._unit, lazy_load) return self._unit def toWikibase(self) -> Dict[str, Any]: json = {'amount': self._fromdecimal(self.amount), 'upperBound': self._fromdecimal(self.upperBound), 'lowerBound': self._fromdecimal(self.lowerBound), 'unit': self.unit } return json @classmethod def fromWikibase(cls: WB_QUANTITY_CLASS, data: Dict[str, Any], site: Optional[DataSite] = None) -> 'WbQuantity': amount = cls._todecimal(data['amount']) upperBound = cls._todecimal(data.get('upperBound')) lowerBound = cls._todecimal(data.get('lowerBound')) bounds_provided = (upperBound is not None and lowerBound is not None) error = None if bounds_provided or cls._require_errors(site): error = (upperBound - amount, amount - lowerBound) if data['unit'] == '1': unit = None else: unit = data['unit'] return cls(amount, unit, error, site) class WbMonolingualText(_WbRepresentation): _items = ('text', 'language') def __init__(self, text: str, language: str): if not text or not language: raise ValueError('text and language cannot be empty') self.text = text self.language = language def toWikibase(self) -> Dict[str, Any]: json = {'text': self.text, 'language': self.language } return json @classmethod def fromWikibase(cls: WB_MONOLINGUAL_TEXT_CLASS, data: Dict[str, Any], site: Optional[DataSite] = None) -> 'WbMonolingualText': return cls(data['text'], data['language']) class _WbDataPage(_WbRepresentation): _items = ('page', ) @classmethod def _get_data_site(cls: WB_DATA_PAGE_CLASS, repo_site: DataSite ) -> APISite: raise NotImplementedError @classmethod def _get_type_specifics(cls: WB_DATA_PAGE_CLASS, site: DataSite ) -> Dict[str, Any]: raise NotImplementedError @staticmethod
MIT License
zhuhao-nju/hmd
src/renderer.py
SMPLRenderer.__call__
python
def __call__(self, verts=None, faces=None, vert_colors=None, cam=None, img=None, do_alpha=False, far=None, near=None, color_id=0, img_size=None): if verts is None: if self.verts is not None: verts = self.verts else: eprint("No verts found, use argument verts " "or set_verts function") return False if faces is None: faces = self.faces if vert_colors is None and self.vert_colors is not None: vert_colors = self.vert_colors if img is not None: h, w = img.shape[:2] elif img_size is not None: h = img_size[0] w = img_size[1] else: h = self.h w = self.w if cam is None: if self.cam is not None: cam = self.cam else: cam = [self.flength, w / 2., h / 2.] use_cam = ProjectPoints( f=cam[0] * np.ones(2), rt=np.zeros(3), t=np.zeros(3), k=np.zeros(5), c=cam[1:3]) if near is None: near = np.maximum(np.min(verts[:, 2]) - 25, 0.1) if far is None: far = np.maximum(np.max(verts[:, 2]) + 25, 25) imtmp = render_model( verts, faces, w, h, use_cam, vert_colors = vert_colors, do_alpha = do_alpha, img = img, far = far, near = near, color_id = color_id) return (imtmp * 255).astype('uint8')
cam is 3D [f, px, py]
https://github.com/zhuhao-nju/hmd/blob/da849b6e0e9bf52f681d51fe821310856de3ff00/src/renderer.py#L44-L110
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import cv2 from opendr.camera import ProjectPoints from opendr.renderer import ColoredRenderer from opendr.renderer import DepthRenderer from opendr.lighting import LambertianPointLight colors = { 'light_blue': [0.65098039, 0.74117647, 0.85882353], 'light_black': [.0, .0, .0], 'light_white': [1.0, 1.0, 1.0,], 'light_pink': [.9, .7, .7] } def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) class SMPLRenderer(object): def __init__(self, img_size=224, flength=500., face_path="../predef/smpl_faces.npy", verts = None, vert_colors = None, cam = None): self.faces = np.load(face_path) self.verts = verts self.vert_colors = vert_colors self.cam = cam self.w = img_size self.h = img_size self.flength = flength
MIT License
vforgione/logging2
logging2/handlers/sockets.py
SocketHandler.__init__
python
def __init__( self, name: Optional[str] = None, level: Optional[LogLevel] = None, **kwargs ): self.host: str = kwargs.get("host") self.port: str = kwargs.get("port") self.encoding: str = kwargs.get("encoding", "utf8") self.family: int = kwargs.get("family") self.type: int = kwargs.get("type") super().__init__(name=name, level=level) if self.port is not None and self.type != socket.SOCK_DGRAM: sock = socket.create_connection((self.host, self.port)) else: if self.port: address = (self.host, self.port) else: address = self.host if self.family and self.type: sock = socket.socket(self.family, self.type) elif self.type: sock = socket.socket(socket.AF_UNIX, self.type) self.family = socket.AF_UNIX else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self.family = socket.AF_UNIX self.type = socket.SOCK_DGRAM sock.connect(address) self.socket: socket.socket = sock
Instantiates a new ``SocketHandler`` :param name: the name of the handler :param level: the minimum verbosity level to write log entries :keyword host: the host portion of the connection -- this can be an FQDN, IP or a local UNIX socket node name :keyword port: the port number to connect on :keyword encoding: the message encoding :keyword family: the socket family -- for example AF_UNIX or AF_INET :keyword type: the socket type -- for example SOCK_STREAM or SOCK_DGRAM
https://github.com/vforgione/logging2/blob/9d620c14d9b5f67e3dc285082330296cf1ecbfcd/logging2/handlers/sockets.py#L13-L51
import socket import syslog from typing import Optional from logging2.handlers.abc import Handler from logging2.levels import LogLevel class SocketHandler(Handler):
MIT License
pierreexeter/rl_reach
code/gym_envs/gym_envs/widowx_env/env_description.py
RewardFunctions.get_reward12
python
def get_reward12(self): if self.dist >= 0.001: self.term1 = 0 else: self.term1 = 1 self.term2 = 0 rew = self.term1 + self.term2 return rew
Compute reward function 12 (sparse)
https://github.com/pierreexeter/rl_reach/blob/4f9c46c8503a84edaa48f9dfd58054548552253a/code/gym_envs/gym_envs/widowx_env/env_description.py#L286-L294
import numpy as np import pybullet as p class ObservationShapes: def __init__( self, endeffector_pos, endeffector_orient, torso_pos, torso_orient, goal_pos, goal_orient, joint_positions): self.endeffector_pos = endeffector_pos self.endeffector_orient = endeffector_orient self.torso_pos = torso_pos self.torso_orient = torso_orient self.goal_pos = goal_pos self.goal_orient = goal_orient self.joint_positions = joint_positions self.end_torso_pos = self.endeffector_pos - self.torso_pos self.end_goal_pos = self.endeffector_pos - self.goal_pos self.end_torso_orient = self.endeffector_orient - self.torso_orient self.end_goal_orient = self.endeffector_orient - self.goal_orient def get_obs1(self): robot_obs = np.concatenate( [self.endeffector_pos, self.joint_positions]).ravel() return robot_obs def get_obs2(self): robot_obs = np.concatenate( [self.goal_pos, self.joint_positions]).ravel() return robot_obs def get_obs3(self): robot_obs = np.concatenate( [self.end_torso_pos, self.end_goal_pos, self.joint_positions]).ravel() return robot_obs def get_obs4(self): robot_obs = np.concatenate( [self.end_goal_pos, self.joint_positions]).ravel() return robot_obs def get_obs5(self): robot_obs = np.concatenate( [self.end_torso_pos, self.end_goal_pos, self.goal_pos, self.joint_positions]).ravel() return robot_obs def get_obs6(self): robot_obs = np.concatenate( [ self.end_torso_pos, self.end_goal_pos, self.end_torso_orient, self.end_goal_orient, self.goal_pos, self.goal_orient, self.endeffector_pos, self.endeffector_orient, self.joint_positions ]).ravel() return robot_obs def get_obs7(self): robot_obs = np.concatenate( [ self.end_torso_pos, self.end_goal_pos, self.goal_pos, self.endeffector_pos, self.joint_positions ]).ravel() return robot_obs class ActionShapes: def __init__( self, pybullet_action, joint_positions, joint_min, joint_max, arm, physics_client, frame_skip): self.pybullet_action = pybullet_action self.joint_positions = joint_positions self.joint_min = joint_min self.joint_max = joint_max self.arm = arm self.physics_client = physics_client self.frame_skip = frame_skip self.new_joint_positions = self.joint_positions + self.pybullet_action self.new_joint_positions = np.clip( np.array(self.new_joint_positions), self.joint_min, self.joint_max) def take_action1(self): self.force_joint_positions(self.new_joint_positions) def take_action2(self): self.set_joint_positions(self.new_joint_positions) for _ in range(self.frame_skip): p.stepSimulation(physicsClientId=self.physics_client) def set_joint_positions(self, joint_positions): joint_positions = list(joint_positions) + [joint_positions[-1]] p.setJointMotorControlArray( self.arm, [0, 1, 2, 3, 4, 7, 8], controlMode=p.POSITION_CONTROL, targetPositions=joint_positions ) def force_joint_positions(self, joint_positions): for i in range(5): p.resetJointState( self.arm, i, joint_positions[i] ) for i in range(7, 9): p.resetJointState( self.arm, i, joint_positions[-1] ) class RewardFunctions: def __init__( self, dist, alpha_reward, action, delta_dist, delta_pos, orient, collision): self.dist = dist self.alpha_reward = alpha_reward self.action = action self.delta_dist = delta_dist self.delta_pos = delta_pos self.orient = orient self.collision = collision self.term1 = 0 self.term2 = 0 def get_reward1(self): self.term1 = - self.dist ** 2 self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward2(self): self.term1 = - self.dist self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward3(self): self.term1 = - self.dist ** 3 self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward4(self): self.term1 = - self.dist ** 4 self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward5(self): self.term1 = - self.dist ** 2 self.term2 = - self.alpha_reward * np.linalg.norm(self.action) rew = self.term1 + self.term2 return rew def get_reward6(self): self.term1 = - self.dist ** 2 self.term2 = - self.alpha_reward * np.linalg.norm(self.action) / self.dist ** 2 rew = self.term1 + self.term2 return rew def get_reward7(self): self.term1 = self.delta_dist self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward8(self): self.term1 = - self.dist ** 2 self.term2 = self.alpha_reward * abs(self.delta_dist / self.dist) rew = self.term1 + self.term2 return rew def get_reward9(self): self.term1 = self.delta_pos self.term2 = 0 rew = self.term1 + self.term2 return rew def get_reward10(self): self.term1 = - self.dist ** 2 self.term2 = - self.alpha_reward * self.delta_pos / self.dist rew = self.term1 + self.term2 return rew def get_reward11(self): if self.dist >= 0.001: self.term1 = -1 else: self.term1 = 0 self.term2 = 0 rew = self.term1 + self.term2 return rew
MIT License
idlesign/envbox
envbox/envs.py
Environment.__init__
python
def __init__(self, name: str = None, type_cast: bool = None): self.name = name or self.name self.type_cast = type_cast or self.type_cast
:param name: Environment name. .. note:: This will prevail over class attribute. :param type_cast: Whether to cast values into Python natives in .get() and .getmany() by default. .. note:: This will prevail over class attribute.
https://github.com/idlesign/envbox/blob/886fc00d5f314ada52797d359f90a64579682eba/envbox/envs.py#L39-L51
import os from typing import Union, Type, List, Sequence, Any from .utils import cast_type, read_envfile DEVELOPMENT = 'development' TESTING = 'testing' STAGING = 'staging' PRODUCTION = 'production' TYPES = {} class Environment: name: str = 'dummy' aliases: List[str] = [] type_cast: bool = False is_development: bool = False is_testing: bool = False is_staging: bool = False is_production: bool = False env = os.environ
BSD 3-Clause New or Revised License
paddlepaddle/paddle
python/paddle/distributed/collective.py
all_reduce
python
def all_reduce(tensor, op=ReduceOp.SUM, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id if in_dygraph_mode(): if op == ReduceOp.SUM: return _C_ops.c_allreduce_sum_(tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id) elif op == ReduceOp.MAX: return _C_ops.c_allreduce_max_(tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id) elif op == ReduceOp.MIN: return _C_ops.c_allreduce_min_(tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id) elif op == ReduceOp.PROD: return _C_ops.c_allreduce_prod_(tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id) else: raise ValueError("Unknown parameter: {}.".format(op)) check_variable_and_dtype( tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], 'all_reduce') if not op in [ReduceOp.SUM, ReduceOp.MAX, ReduceOp.MIN, ReduceOp.PROD]: raise ValueError("The op for all_reduce must be one of educeOp.PROD, " "ReduceOp.SUM, ReduceOp.MAX, ReduceOp.MIN.") if op == ReduceOp.SUM: op_type = 'c_allreduce_sum' elif op == ReduceOp.MAX: op_type = 'c_allreduce_max' elif op == ReduceOp.MIN: op_type = 'c_allreduce_min' elif op == ReduceOp.PROD: op_type = 'c_allreduce_prod' if not isinstance(ring_id, int): raise ValueError("The type of 'ring_id' for all_reduce should be int.") helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, outputs={'Out': [tensor]}, attrs={'ring_id': ring_id, 'use_calc_stream': use_calc_stream})
Reduce a tensor over all ranks so that all get the result. Args: tensor (Tensor): The input Tensor. It also works as the output Tensor. Its data type should be float16, float32, float64, int32 or int64. op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD): Optional. The operation used. Default value is ReduceOp.SUM. group (Group): The group instance return by new_group or None for global default group. use_calc_stream (bool): Wether to use calculation stream (True) or communication stream (False). Default to True. Returns: None. Examples: .. code-block:: python import numpy as np import paddle from paddle.distributed import ReduceOp from paddle.distributed import init_parallel_env paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) init_parallel_env() if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) data = paddle.to_tensor(np_data) paddle.distributed.all_reduce(data) out = data.numpy() # [[5, 7, 9], [5, 7, 9]]
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/distributed/collective.py#L415-L492
import numpy as np import os from ..fluid.layer_helper import LayerHelper from ..fluid.framework import Variable from ..fluid.framework import OpProtoHolder from ..fluid.framework import in_dygraph_mode from ..fluid.framework import convert_np_dtype_to_dtype_ from ..fluid.framework import _varbase_creator from ..fluid.data_feeder import convert_dtype from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.data_feeder import check_type from ..fluid.data_feeder import check_dtype from ..fluid.layers.tensor import fill_constant from ..fluid.layers import utils from ..fluid.dygraph import layers from ..fluid.dygraph.parallel import prepare_context import paddle from .fleet import fleet import paddle.fluid as fluid import paddle.fluid.core as core from paddle import _C_ops import paddle.fluid.dygraph_utils as dygraph_utils __all__ = [] class ReduceOp: SUM = 0 MAX = 1 MIN = 2 PROD = 3 class Group(): def __init__(self, rank, rank_num, id=0, ranks=[]): self.rank = rank self.nranks = rank_num self.id = id self.ranks = ranks def is_member(self): if self.rank < 0: return False if self.nranks < 2: return False return True def get_group_rank(self, rank): if self.is_member() and rank in self.ranks: return self.ranks.index(rank) else: return -1 def __repr__(self): debug_str = "rank: {}, nranks: {}, id: {}, ranks: ".format( self.rank, self.nranks, self.id) debug_str += ", ".join(map(str, self.ranks)) debug_str += ". " return debug_str _global_env = None def _get_global_env(): global _global_env if not _global_env: _global_env = paddle.distributed.ParallelEnv() return _global_env _group_map = {} def _get_group_map(): global _group_map if not _group_map: genv = _get_global_env() _group_map[0] = Group(genv.rank, genv.world_size, list(range(genv.world_size))) return _group_map def _get_global_group(): return _get_group_map()[0] def _new_ring_id(): return len(_get_group_map()) + max(_get_global_env().nrings, 9) def get_group(id=0): gm = _get_group_map() return gm[id] if id in gm else None def barrier(group=None): if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id temp = fill_constant([1], dtype="int32", value="1") if in_dygraph_mode(): return _C_ops.barrier(temp, temp, 'ring_id', ring_id) op_type = 'barrier' if not isinstance(ring_id, int): raise ValueError("The type of 'group' for barrier must be int.") helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [temp]}, outputs={'Out': [temp]}, attrs={'ring_id': ring_id}) def new_group(ranks=None, backend=None): if not backend: backend = 'nccl' assert backend == 'nccl', ("backend other than nccl is not supported yet") genv = _get_global_env() global_rank = genv.rank ring_id = _new_ring_id() global _group_map if global_rank not in ranks: gp = Group(-1, -1, ring_id, ranks) _group_map[ring_id] = gp else: ranks = sorted(ranks) group_rank = ranks.index(global_rank) group_size = len(ranks) gp = Group(group_rank, group_size, ring_id, ranks) _group_map[ring_id] = gp if group_size >= 2: strategy = core.ParallelStrategy() strategy.nranks = group_size strategy.local_rank = group_rank strategy.trainer_endpoints = [ genv.trainer_endpoints[i] for i in ranks ] strategy.current_endpoint = genv.current_endpoint strategy.nrings = 1 if core.is_compiled_with_cuda(): place = core.CUDAPlace(genv.device_id) core.NCCLParallelContext(strategy, place).init_with_ring_id(ring_id) else: assert False, ("no cuda device found") else: return gp tmp = paddle.to_tensor( [1], dtype="int32") if in_dygraph_mode() else fill_constant( [0], dtype="int32", value="1") paddle.distributed.all_reduce(tmp, use_calc_stream=True) paddle.distributed.wait(tmp) return gp def wait(tensor, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id if use_calc_stream: _sync_calc_stream(tensor) else: _sync_comm_stream(tensor, ring_id) def _sync_calc_stream(tensor): if in_dygraph_mode(): return _C_ops.c_sync_calc_stream(tensor, tensor) op_type = 'c_sync_calc_stream' helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, outputs={'Out': [tensor]}, ) def _sync_comm_stream(tensor, ring_id=0): if in_dygraph_mode(): return _C_ops.c_sync_comm_stream([tensor], [tensor], 'ring_id', ring_id) op_type = 'c_sync_comm_stream' helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, outputs={'Out': [tensor]}, attrs={'ring_id': ring_id}, ) def broadcast(tensor, src, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return if not isinstance(src, int): raise ValueError("src should be int.") ring_id = 0 if group is None else group.id gsrc = src if group is None else group.get_group_rank(src) assert gsrc >= 0, ("src rank out of group, need global rank") if in_dygraph_mode(): return _C_ops.c_broadcast(tensor, tensor, 'root', gsrc, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id) op_type = 'c_broadcast' check_variable_and_dtype( tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], 'broadcast') helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, outputs={'Out': [tensor]}, attrs={ 'root': gsrc, 'use_calc_stream': use_calc_stream, 'ring_id': ring_id, })
Apache License 2.0
intel/dffml
dffml/operation/archive.py
extract_zip_archive
python
async def extract_zip_archive( input_file_path: str, output_directory_path: str, ) -> dict: with zipfile.ZipFile(input_file_path, "r") as zip: zip.extractall(output_directory_path) return {"output_path": output_directory_path}
Extracts a given zip file. Parameters ---------- input_file_path : str Path to the zip file output_directory_path : str Path where all the files should be extracted Returns ------- dict Path to the directory where the archive has been extracted
https://github.com/intel/dffml/blob/e7a356dfe8fd6fdf3cac7f8c218abc7d650fd93c/dffml/operation/archive.py#L68-L88
import tarfile import zipfile import pathlib from ..df.base import op from ..df.types import Definition DIRECTORY = Definition(name="directory", primitive="str") ZIP_FILE = Definition(name="zip_file", primitive="str") TAR_FILE = Definition(name="tar_file", primitive="str") OUTPUT_ZIPFILE_PATH = Definition(name="output_zipfile_path", primitive="str") OUTPUT_TARFILE_PATH = Definition(name="output_tarfile_path", primitive="str") OUTPUT_DIRECTORY_PATH = Definition( name="output_directory_path", primitive="str" ) def recursive_add_to_archive(archive_handle, path, archive_path): if not isinstance(path, pathlib.Path): path = pathlib.Path(path) if path.is_file(): archive_handle(path, archive_path) elif path.is_dir(): if archive_path: archive_handle(path, archive_path) path_names = [pth.name for pth in path.iterdir()] for name in sorted(path_names): name = pathlib.Path(name) recursive_add_to_archive( archive_handle, path / name, archive_path / name ) @op( inputs={"input_directory_path": DIRECTORY, "output_file_path": ZIP_FILE}, outputs={"output_path": OUTPUT_ZIPFILE_PATH}, ) async def make_zip_archive( input_directory_path: str, output_file_path: str, ) -> dict: with zipfile.ZipFile(output_file_path, "w") as zip: recursive_add_to_archive(zip.write, input_directory_path, "") return {"output_path": output_file_path} @op( inputs={"input_file_path": ZIP_FILE, "output_directory_path": DIRECTORY}, outputs={"output_path": OUTPUT_DIRECTORY_PATH}, )
MIT License
jamesbrofos/odin
odin/utilities/mixins/strategy_mixins/transaction_mixins.py
NeverExitIndicatorMixin.exit_indicator
python
def exit_indicator(self, feats): return False
Implementation of abstract base class method.
https://github.com/jamesbrofos/odin/blob/e2e9d638c68947d24f1260d35a3527dd84c2523f/odin/utilities/mixins/strategy_mixins/transaction_mixins.py#L20-L22
from ....strategy import AbstractStrategy class AlwaysBuyIndicatorMixin(AbstractStrategy): def buy_indicator(self, feats): return True class NeverSellIndicatorMixin(AbstractStrategy): def sell_indicator(self, feats): return False class NeverExitIndicatorMixin(AbstractStrategy):
MIT License
ljvmiranda921/pyswarms
pyswarms/backend/handlers.py
BoundaryHandler.__init__
python
def __init__(self, strategy): self.strategy = strategy self.strategies = self._get_all_strategies() self.rep = Reporter(logger=logging.getLogger(__name__)) self.memory = None
A BoundaryHandler class This class offers a way to handle boundary conditions. It contains methods to repair particle positions outside of the defined boundaries. Following strategies are available for the handling: * Nearest: Reposition the particle to the nearest bound. * Random: Reposition the particle randomly in between the bounds. * Shrink: Shrink the velocity of the particle such that it lands on the bounds. * Reflective: Mirror the particle position from outside the bounds to inside the bounds. * Intermediate: Reposition the particle to the midpoint between its current position on the bound surpassing axis and the bound itself. This only adjusts the axes that surpass the boundaries. The BoundaryHandler can be called as a function to use the strategy that is passed at initialization to repair boundary issues. An example for the usage: .. code-block :: python from pyswarms.backend import operators as op from pyswarms.backend.handlers import BoundaryHandler bh = BoundaryHandler(strategy="reflective") ops.compute_position(swarm, bounds, handler=bh) By passing the handler, the :func:`compute_position()` function now has the ability to reset the particles by calling the :code:`BoundaryHandler` inside. Attributes ---------- strategy : str The strategy to use. To see all available strategies, call :code:`BoundaryHandler.strategies`
https://github.com/ljvmiranda921/pyswarms/blob/ea161d9a932388a2595e777b8f140833406e0a77/pyswarms/backend/handlers.py#L66-L113
import inspect import logging import numpy as np import math from copy import copy from ..utils.reporter import Reporter class HandlerMixin(object): def _merge_dicts(self, *dict_args): result = {} for dictionary in dict_args: result.update(dictionary) return result def _out_of_bounds(self, position, bounds): lb, ub = bounds greater_than_bound = np.nonzero(position > ub) lower_than_bound = np.nonzero(position < lb) return (lower_than_bound, greater_than_bound) def _get_all_strategies(self): return { k: v for k, v in inspect.getmembers(self, predicate=inspect.isroutine) if not k.startswith(("__", "_")) } class BoundaryHandler(HandlerMixin):
MIT License
graphql-python/graphql-core-legacy
graphql/backend/decider.py
AsyncWorker.start
python
def start(self): self._lock.acquire() try: if not self.is_alive(): self._thread = threading.Thread( target=self._target, name="graphql.AsyncWorker" ) self._thread.setDaemon(True) self._thread.start() self._thread_for_pid = os.getpid() finally: self._lock.release() atexit.register(self.main_thread_terminated)
Starts the task thread.
https://github.com/graphql-python/graphql-core-legacy/blob/6e2fbccdec655ce9122b84d3808c14242c4e6b96/graphql/backend/decider.py#L103-L118
import atexit import logging import threading import os from time import sleep, time from ..pyutils.compat import Queue, check_threads from .base import GraphQLBackend, GraphQLDocument from .cache import GraphQLCachedBackend if False: from typing import List, Union, Optional, Hashable, Dict, Tuple, Type from ..type.schema import GraphQLSchema DEFAULT_TIMEOUT = 10 logger = logging.getLogger("graphql.errors") class AsyncWorker(object): _terminator = object() def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT): check_threads() self._queue = Queue(-1) self._lock = threading.Lock() self._thread = None self._thread_for_pid = None self.options = {"shutdown_timeout": shutdown_timeout} self.start() def is_alive(self): if self._thread_for_pid != os.getpid(): return False return self._thread and self._thread.is_alive() def _ensure_thread(self): if self.is_alive(): return self.start() def main_thread_terminated(self): with self._lock: if not self.is_alive(): return self._queue.put_nowait(self._terminator) timeout = self.options["shutdown_timeout"] initial_timeout = min(0.1, timeout) if not self._timed_queue_join(initial_timeout): size = self._queue.qsize() print("GraphQL is attempting to retrieve %i pending documents" % size) print("Waiting up to %s seconds" % timeout) if os.name == "nt": print("Press Ctrl-Break to quit") else: print("Press Ctrl-C to quit") self._timed_queue_join(timeout - initial_timeout) self._thread = None def _timed_queue_join(self, timeout): deadline = time() + timeout queue = self._queue queue.all_tasks_done.acquire() try: while queue.unfinished_tasks: delay = deadline - time() if delay <= 0: return False queue.all_tasks_done.wait(timeout=delay) return True finally: queue.all_tasks_done.release()
MIT License
perslev/u-time
utime/hypnogram/utils.py
signal_dense_to_dense
python
def signal_dense_to_dense(array, sample_rate, period_length_sec, allow_trim=False): array = array.squeeze() if sample_rate is None or period_length_sec is None: raise ValueError("Must specify the 'sample_rate' and 'period_length_sec' parameters.") if array.ndim != 1: raise ValueError("Invalid dense array found of dim {} (expected 1)" "".format(array.ndim)) if len(array) % sample_rate: raise ValueError("Signal dense array of shape {} is not divisible by " "the sample rate of {}".format(array.shape, sample_rate)) end_time = int(len(array) / sample_rate) if end_time < period_length_sec: raise ValueError("Signal dense array too short (length {}) with period" " length of {} seconds). Maybe the array is already " "dense?".format(len(array), period_length_sec)) trail = (end_time % period_length_sec) * sample_rate if trail: if not allow_trim: raise ValueError("Signal dense array of length {} ({} seconds) " "is not evenly divisible by the period length " "of {} seconds, and allow_trim was set to " "False.") array = array[:-trail] s = [period_length_sec * sample_rate, -1] return np.reshape(array, s, order="F")[0, :]
Takes a 'signal dense' hypnogram array (ndarray of shape [-1]) of sleep stages and returns a dense array of 1 label for every 'period_length_sec' second periods of sleep. OBS: Assumes that all values within 'period_length_sec' seconds of signal are identical starting at the 0th value in the array. See 'signal_dense_to_sparse' for a description of signal dense arrays. Args: array: 1D ndarray of (signal dense) sleep stage labels sample_rate: The sample rate of the input signal period_length_sec: Length in seconds of 1 sleep stage period. allow_trim: Allow the duration of array (in seconds) to be non-evenly divisible by the 'period_length_sec'. The remainder will be ignored. Otherwise, an error is raised if the array cannot be evenly split. Returns: ndarray of shape [-1] with 1 sleep stage label value for every 'period_length_sec' seconds of input signal.
https://github.com/perslev/u-time/blob/f7c8e3f1368f43226872a69b0fbb8c29990e4bd9/utime/hypnogram/utils.py#L163-L214
import numpy as np from utime.hypnogram import SparseHypnogram from utime.hypnogram.formats import StartDurationStageFormat from utime.hypnogram.stage_mapper import create_variable_ann_to_class_int_dict def create_class_int_to_period_idx_dict(hypnogram): classes = hypnogram.classes if isinstance(hypnogram, SparseHypnogram): hypnogram = hypnogram.to_dense() stages = hypnogram["sleep_stage"].to_numpy() return {c: np.where(stages == c)[0] for c in classes} def sparse_to_csv_file(inits, durs, stages, out_path, stage_map=None): assert len(inits) == len(durs) == len(stages) with open(out_path, "w") as out_f: for i, d, s in zip(inits, durs, stages): if stage_map: s = stage_map[s] out_f.write("{},{},{}\n".format(i, d, s)) def dense_to_sparse(array, period_length_sec, allow_trim=False): array = array.squeeze() if array.ndim != 1: raise ValueError("Invalid dense array found of dim {} (expected 1)" "".format(array.ndim)) end_time = len(array) * period_length_sec start_inds = np.where([array[i+1] != array[i] for i in range(len(array)-1)])[0] + 1 start_inds = np.concatenate([[0], start_inds]) inits = (start_inds * period_length_sec).astype(np.int) durs = np.concatenate([np.diff(inits), [end_time-inits[-1]]]) stages = array[start_inds] if durs[-1] == 0: if not allow_trim: raise ValueError("Last duration is shorter than 1 second, " "but allow_trim was set to False") inits = inits[:-1] durs = durs[:-1] stages = stages[:-1] trail = durs[-1] % period_length_sec if trail: if not allow_trim: raise ValueError("Last duration of length {} seconds is not " "divisible by the period length of {} seconds, " "and allow_trim was set to False") durs[-1] -= trail return inits, durs, stages def signal_dense_to_sparse(array, sample_rate, period_length_sec, allow_trim=False): d = signal_dense_to_dense(array, sample_rate, period_length_sec, allow_trim) return dense_to_sparse(d, period_length_sec, allow_trim)
MIT License
globocom/globonetworkapi-client-python
networkapiclient/Network.py
DHCPRelayIPv6.get_by_pk
python
def get_by_pk(self, dhcprelayv6_id): uri = 'api/dhcprelayv6/%s' % dhcprelayv6_id return self.get(uri)
List DHCPRelayIPv6 by ID :param: dhcprelayv4_id :return: Following dictionary: { "networkipv6": <networkipv4_id>, "id": <id>, "ipv6": { "block1": <block1>, "block2": <block2>, "block3": <block3>, "block4": <block4>, "block5": <block5>, "block6": <block6>, "block7": <block7>, "block8": <block8>, "ip_formated": "<string IPv6>", "networkipv6": <networkipv6_id>, "id": <ipv6_id>, "description": "<string description>" } :raise NetworkAPIException: Falha ao acessar fonte de dados
https://github.com/globocom/globonetworkapi-client-python/blob/08dc24c54ee3cd6cdcca1fb33fb4796db8118e6f/networkapiclient/Network.py#L692-L720
from networkapiclient.ApiGenericClient import ApiGenericClient from networkapiclient.exception import InvalidParameterError from networkapiclient.GenericClient import GenericClient from networkapiclient.utils import is_valid_int_param class Network(GenericClient): def __init__(self, networkapi_url, user, password, user_ldap=None): super( Network, self).__init__( networkapi_url, user, password, user_ldap) def create_networks(self, ids, id_vlan): network_map = dict() network_map['ids'] = ids network_map['id_vlan'] = id_vlan code, xml = self.submit( {'network': network_map}, 'PUT', 'network/create/') return self.response(code, xml) def add_network(self, network, id_vlan, id_network_type, id_environment_vip=None, cluster_unit=None): network_map = dict() network_map['network'] = network network_map['id_vlan'] = id_vlan network_map['id_network_type'] = id_network_type network_map['id_environment_vip'] = id_environment_vip network_map['cluster_unit'] = cluster_unit code, xml = self.submit( {'network': network_map}, 'POST', 'network/add/') return self.response(code, xml) def add_network_ipv4( self, id_vlan, id_tipo_rede, id_ambiente_vip=None, prefix=None): vlan_map = dict() vlan_map['id_vlan'] = id_vlan vlan_map['id_tipo_rede'] = id_tipo_rede vlan_map['id_ambiente_vip'] = id_ambiente_vip vlan_map['prefix'] = prefix code, xml = self.submit( {'vlan': vlan_map}, 'POST', 'network/ipv4/add/') return self.response(code, xml) def add_network_ipv4_hosts( self, id_vlan, id_tipo_rede, num_hosts, id_ambiente_vip=None): vlan_map = dict() vlan_map['id_vlan'] = id_vlan vlan_map['id_tipo_rede'] = id_tipo_rede vlan_map['num_hosts'] = num_hosts vlan_map['id_ambiente_vip'] = id_ambiente_vip code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'network/ipv4/add/') return self.response(code, xml) def edit_network(self, id_network, ip_type, id_net_type, id_env_vip=None, cluster_unit=None): net_map = dict() net_map['id_network'] = id_network net_map['ip_type'] = ip_type net_map['id_net_type'] = id_net_type net_map['id_env_vip'] = id_env_vip net_map['cluster_unit'] = cluster_unit code, xml = self.submit({'net': net_map}, 'POST', 'network/edit/') return self.response(code, xml) def get_network_ipv4(self, id_network): if not is_valid_int_param(id_network): raise InvalidParameterError( u'O id do rede ip4 foi informado incorretamente.') url = 'network/ipv4/id/' + str(id_network) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml) def get_network_ipv6(self, id_network): if not is_valid_int_param(id_network): raise InvalidParameterError( u'O id do rede ip6 foi informado incorretamente.') url = 'network/ipv6/id/' + str(id_network) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml) def deallocate_network_ipv4(self, id_network_ipv4): if not is_valid_int_param(id_network_ipv4): raise InvalidParameterError( u'The identifier of NetworkIPv4 is invalid or was not informed.') url = 'network/ipv4/' + str(id_network_ipv4) + '/deallocate/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml) def add_network_ipv6( self, id_vlan, id_tipo_rede, id_ambiente_vip=None, prefix=None): vlan_map = dict() vlan_map['id_vlan'] = id_vlan vlan_map['id_tipo_rede'] = id_tipo_rede vlan_map['id_ambiente_vip'] = id_ambiente_vip vlan_map['prefix'] = prefix code, xml = self.submit( {'vlan': vlan_map}, 'POST', 'network/ipv6/add/') return self.response(code, xml) def add_network_ipv6_hosts( self, id_vlan, id_tipo_rede, num_hosts, id_ambiente_vip=None): vlan_map = dict() vlan_map['id_vlan'] = id_vlan vlan_map['id_tipo_rede'] = id_tipo_rede vlan_map['num_hosts'] = num_hosts vlan_map['id_ambiente_vip'] = id_ambiente_vip code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'network/ipv6/add/') return self.response(code, xml) def deallocate_network_ipv6(self, id_network_ipv6): if not is_valid_int_param(id_network_ipv6): raise InvalidParameterError( u'The identifier of NetworkIPv6 is invalid or was not informed.') url = 'network/ipv6/' + str(id_network_ipv6) + '/deallocate/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml) def remove_networks(self, ids): network_map = dict() network_map['ids'] = ids code, xml = self.submit( {'network': network_map}, 'PUT', 'network/remove/') return self.response(code, xml) class DHCPRelayIPv4(ApiGenericClient): def __init__(self, networkapi_url, user, password, user_ldap=None): super(DHCPRelayIPv4, self).__init__( networkapi_url, user, password, user_ldap) def add(self, networkipv4_id, ipv4_id): data = dict() data['networkipv4'] = networkipv4_id data['ipv4'] = dict() data['ipv4']['id'] = ipv4_id uri = 'api/dhcprelayv4/' return self.post(uri, data=data) def get_by_pk(self, dhcprelayv4_id): uri = 'api/dhcprelayv4/%s' % dhcprelayv4_id return self.get(uri) def list(self, networkipv4=None, ipv4=None): uri = 'api/dhcprelayv4/?' if networkipv4: uri += 'networkipv4=%s&' % networkipv4 if ipv4: uri += 'ipv4=%s' % ipv4 return self.get(uri) def remove(self, dhcprelayv4_id): uri = 'api/dhcprelayv4/%s' % dhcprelayv4_id return self.delete(uri) class DHCPRelayIPv6(ApiGenericClient): def __init__(self, networkapi_url, user, password, user_ldap=None): super(DHCPRelayIPv6, self).__init__( networkapi_url, user, password, user_ldap) def add(self, networkipv6_id, ipv6_id): data = dict() data['networkipv6'] = networkipv6_id data['ipv6'] = dict() data['ipv6']['id'] = ipv6_id uri = 'api/dhcprelayv6/' return self.post(uri, data=data)
Apache License 2.0
ekumenlabs/terminus
terminus/geometry/line_segment.py
LineSegment.point_at_offset
python
def point_at_offset(self, offset): if offset > self.length(): raise ValueError("Offset ({0})is greater than segment length ({1})".format(offset, self.length())) dx = self.b.x - self.a.x dy = self.b.y - self.a.y linelen = math.hypot(dx, dy) return Point(self.a.x + dx / linelen * offset, self.a.y + dy / linelen * offset)
Returns a point in the segment considered as an offset from the start of the segment. The offset is considered assuming the direction defined as if the line segment was a vector going from point a to point b
https://github.com/ekumenlabs/terminus/blob/ce6dcdc797011155e8fd52d40d910bdaf9bfe397/terminus/geometry/line_segment.py#L250-L263
import math from geometry.point import Point import geometry.arc import geometry.circle class LineSegment(object): def __init__(self, point_a, point_b): self.a = point_a self.b = point_b @classmethod def from_tuples(cls, t1, t2): return cls(Point.from_tuple(t1), Point.from_tuple(t2)) @classmethod def from_point_and_heading(cls, point, heading, length=1): heading_in_radians = math.radians(heading) dx = math.cos(heading_in_radians) * length dy = math.sin(heading_in_radians) * length return cls(point, point + Point(dx, dy)) def includes_point(self, point, buffer=1e-7): cross_product = (self.b - self.a).cross_product(point - self.a) if cross_product.norm() > buffer: return False dot_product = (self.b - self.a).dot_product(point - self.a) if dot_product + buffer < 0.0: return False distance = self.a.squared_distance_to(self.b) if dot_product - distance > buffer: return False return True def start_point(self): return self.a def end_point(self): return self.b def direction_vector(self): return self.b - self.a def length(self): return self.direction_vector().norm() def translate_by(self, point): return LineSegment(self.start_point() + point, self.end_point() + point) def inverted(self): return LineSegment(self.b, self.a) def is_orthogonal_to(self, line_segment, buffer=0.001): return abs((self.b - self.a).dot_product(line_segment.b - line_segment.a)) < buffer def includes_line_segment(self, other_segment): return self.includes_point(other_segment.start_point()) and self.includes_point(other_segment.end_point()) def _find_line_segment_intersection(self, segment): v1 = self.direction_vector() p1 = self.start_point() v2 = segment.direction_vector() p2 = segment.start_point() v3 = p2 - p1 v4 = p1 - p2 v1_cross_v2 = v1.cross_product(v2) v2_cross_v1 = v2.cross_product(v1) if abs(v1_cross_v2.norm()) < 1e-7: overlap = self._collinear_overlap_with(segment) if overlap: return [overlap] else: return [] k1 = v3.cross_product(v2).norm() / v1_cross_v2.norm() k2 = v4.cross_product(v1).norm() / v2_cross_v1.norm() rounded_k1 = round(k1, 7) rounded_k2 = round(k2, 7) if (rounded_k1 < 0.0) or (rounded_k1 > 1.0) or (rounded_k2 < 0.0) or (rounded_k2 > 1.0): return [] candidate = p1 + v1 * k1 if self.includes_point(candidate) and segment.includes_point(candidate): return [candidate] else: return [] def _find_bounding_box_intersection(self, bounding_box): intersections = [] for segment in bounding_box.perimeter(): intersections.extend(self.find_intersection(segment)) return intersections def _find_arc_intersection(self, arc): local_segment = self.translate_by(arc.center_point().negated()) local_segment_vector = local_segment.direction_vector() a = local_segment_vector.norm_squared() b = 2 * ((local_segment_vector.x * local_segment.start_point().x) + (local_segment_vector.y * local_segment.start_point().y)) c = local_segment.start_point().norm_squared() - (arc.radius() ** 2) delta = (b ** 2) - (4 * a * c) if delta < 0: return [] elif delta == 0.0: u = -b / (2 * a) candidate = self.start_point() + (local_segment_vector * u) if self.includes_point(candidate) and arc.includes_point(candidate): return [candidate] else: return [] else: delta = math.sqrt(delta) u1 = (-b + delta) / (2 * a) u2 = (-b - delta) / (2 * a) candidates = [self.start_point() + (local_segment_vector * u1), self.start_point() + (local_segment_vector * u2)] return filter(lambda point: self.includes_point(point) and arc.includes_point(point), candidates) def _find_circle_intersection(self, circle): local_segment = self.translate_by(circle.center().negated()) local_segment_vector = local_segment.direction_vector() a = local_segment_vector.norm_squared() b = 2 * ((local_segment_vector.x * local_segment.start_point().x) + (local_segment_vector.y * local_segment.start_point().y)) c = local_segment.start_point().norm_squared() - (circle.radius() ** 2) delta = (b ** 2) - (4 * a * c) if delta < 0: return [] elif delta == 0.0: u = -b / (2 * a) candidate = self.start_point() + (local_segment_vector * u) if self.includes_point(candidate): return [candidate] else: return [] else: delta = math.sqrt(delta) u1 = (-b + delta) / (2 * a) u2 = (-b - delta) / (2 * a) candidates = [self.start_point() + (local_segment_vector * u1), self.start_point() + (local_segment_vector * u2)] return filter(lambda point: self.includes_point(point), candidates) def _collinear_overlap_with(self, segment): overlap = None if self.includes_line_segment(segment): overlap = segment elif segment.includes_line_segment(self): overlap = self elif self.includes_point(segment.start_point()): overlap = LineSegment(segment.start_point(), segment._pick_segment_endpoint(self)) elif self.includes_point(segment.end_point()): overlap = LineSegment(segment.end_point(), segment._pick_segment_endpoint(self)) if not overlap: return None elif overlap.start_point() == overlap.end_point(): return overlap.start_point() else: return overlap def find_intersection(self, other): if isinstance(other, LineSegment): return self._find_line_segment_intersection(other) elif isinstance(other, geometry.arc.Arc): return self._find_arc_intersection(other) elif isinstance(other, geometry.circle.Circle): return self._find_circle_intersection(other) elif isinstance(other, geometry.bounding_box.BoundingBox): return self._find_bounding_box_intersection(other) else: raise ValueError("Intersection between {0} and {1} not supported".format(self, other)) def extended_by(self, distance): dx = self.b.x - self.a.x dy = self.b.y - self.a.y linelen = math.hypot(dx, dy) new_end = Point(self.b.x + dx / linelen * distance, self.b.y + dy / linelen * distance) return LineSegment(self.a, new_end) def extended_to(self, new_point): new_line_segment = LineSegment(self.a, new_point) if not new_line_segment.includes_point(self.b): raise ValueError("The resulting line segment is not collinear") return new_line_segment
Apache License 2.0
mvondracek/wifimitm
wifimitm/model.py
WirelessInterface.start_monitor_mode
python
def start_monitor_mode(self, channel=None): assert not self.monitor_mode_active, 'Interface already in monitor mode.' cre_mon_enabled = re.compile( r'^\s+\(\S+ monitor mode vif enabled for \[\S+\](?P<name>\S+) on \[\S+\](?P<mon>\S+)\)$') cmd = ['airmon-ng', 'start', self.name] if channel: cmd.append(str(channel)) self.channel = channel process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) process.check_returncode() if process.stderr != '': logger.warning("Unexpected stderr of airmon-ng: '{}'.".format(process.stderr)) for line in process.stdout.splitlines(): m = cre_mon_enabled.match(line) if m: self.monitor_mode_active = True self.name_monitor = m.group('mon') break
:param channel: monitor interface channel Raises: CalledProcessError if airmon-ng returncode is non-zero
https://github.com/mvondracek/wifimitm/blob/bc70a4f1e80430941e3bb7114df69f96a39a04d1/wifimitm/model.py#L331-L363
import logging import netifaces import os import re import shutil import subprocess import tempfile from contextlib import contextmanager __author__ = 'Martin Vondracek' __email__ = 'xvondr20@stud.fit.vutbr.cz' logger = logging.getLogger(__name__) class WirelessStation(object): def __str__(self, *args, **kwargs): return '<WirelessStation mac_address={}, power={}>'.format(self.mac_address, self.power) def __init__(self, mac_address, power): self.mac_address = mac_address self.power = power self.associated_ap = None class WirelessAccessPoint(object): def __str__(self, *args, **kwargs): s = '<WirelessAccessPoint essid={}, bssid={}'.format(self.essid, self.bssid) if self.is_cracked(): if 'WEP' in self.encryption: s += ', PSK(0x' + self.cracked_psk + ', "' + bytes.fromhex(self.cracked_psk).decode('ascii') + '"), ' else: s += ', PSK("' + self.cracked_psk + '")' s += ', power={}, channel={}, encryption={}, cipher={}, authentication={}, wps={}, iv_sum={}>'.format( self.power, self.channel, self.encryption, self.cipher, self.authentication, self.wps, self.iv_sum ) return s def __init__(self, bssid, power, channel, encryption, cipher, authentication, wps, essid: str, iv_sum): self.bssid = bssid self.power = power self.channel = channel self.encryption = encryption self.cipher = cipher self.authentication = authentication self.wps = wps self.essid = essid self.iv_sum = iv_sum self.__dir_path = None self.__temp_dir = None self.associated_stations = list() self.arp_cap_path = None self.psk_path = None self.prga_xor_path = None self.wpa_handshake_cap_path = None self.default_arp_cap_path = os.path.join(self.dir_path, 'ARP.cap') if 'WEP' in self.encryption: self.default_psk_path = os.path.join(self.dir_path, self.encryption + '_PSK.hex') else: self.default_psk_path = os.path.join(self.dir_path, self.encryption + '_PSK.txt') self.default_prga_xor_path = os.path.join(self.dir_path, 'PRGA.xor') self.default_wpa_handshake_cap_path = os.path.join(self.dir_path, 'WPA_handshake.cap') @property def dir_path(self): if not self.__dir_path: path = os.path.expanduser(os.path.join('~', '.wifimitm', 'networks', self.essid)) if path.startswith('~'): self.__temp_dir = tempfile.TemporaryDirectory(prefix='wifimitm-networks') path = self.__temp_dir.name logger.warning('Call os.path.expanduser failed.') self.__dir_path = path return self.__dir_path def make_dir(self): os.makedirs(self.dir_path, exist_ok=True) def is_cracked(self): return self.psk_path is not None @property def cracked_psk(self): if self.psk_path: with open(self.psk_path, 'r') as f: return f.read() def save_arp_cap(self, source_arp_cap_path): if not os.path.isfile(source_arp_cap_path): raise FileNotFoundError shutil.move(source_arp_cap_path, self.default_arp_cap_path) self.arp_cap_path = self.default_arp_cap_path def save_psk_file(self, source_psk_file_path): if not os.path.isfile(source_psk_file_path): raise FileNotFoundError shutil.move(source_psk_file_path, self.default_psk_path) self.psk_path = self.default_psk_path def delete_psk_file(self): if os.path.isfile(self.psk_path): os.remove(self.psk_path) self.psk_path = None def save_wpa_handshake_cap(self, source_wpa_handshake_cap_path): if not os.path.isfile(source_wpa_handshake_cap_path): raise FileNotFoundError shutil.move(source_wpa_handshake_cap_path, self.default_wpa_handshake_cap_path) self.wpa_handshake_cap_path = self.default_wpa_handshake_cap_path def save_prga_xor(self, source_prga_xor_path): if not os.path.isfile(source_prga_xor_path): raise FileNotFoundError shutil.move(source_prga_xor_path, self.default_prga_xor_path) self.prga_xor_path = self.default_prga_xor_path def add_associated_station(self, station): station.associated_ap = self self.associated_stations.append(station) def update_known(self): if not self.arp_cap_path and os.path.isfile(self.default_arp_cap_path): self.arp_cap_path = self.default_arp_cap_path logger.debug(self.essid + ' arp_cap known') if not self.psk_path and os.path.isfile(self.default_psk_path): self.psk_path = self.default_psk_path logger.debug(self.essid + ' psk known') if not self.prga_xor_path and os.path.isfile(self.default_prga_xor_path): self.prga_xor_path = self.default_prga_xor_path logger.debug(self.essid + ' prga_xor known') if not self.wpa_handshake_cap_path and os.path.isfile(self.default_wpa_handshake_cap_path): self.wpa_handshake_cap_path = self.default_wpa_handshake_cap_path logger.debug(self.essid + ' wpa_handshake_cap known') def interface_exists(name: str) -> bool: return name in netifaces.interfaces() class WirelessInterface(object): def __str__(self, *args, **kwargs): s = '<WirelessInterface name={}, mac_address={}, channel={}, driver={}, chipset={}' .format( self.name, self.mac_address, self.channel, self.driver, self.chipset ) if self.monitor_mode_active: s += ', monitor' s += '>' return s def __init__(self, name, driver=None, chipset=None): if not interface_exists(name): raise ValueError('You must specify a valid interface name.') self.name_original = name self.name_monitor = None self.mac_address_original = self.get_mac_by_name(self.name) self.mac_address_spoofed = None self.channel = None self.monitor_mode_active = False self.driver = driver self.chipset = chipset @staticmethod def get_wireless_interface_obj(interface): if isinstance(interface, WirelessInterface): return interface elif isinstance(interface, str): return WirelessInterface(name=interface) else: raise TypeError @staticmethod def get_mac_by_name(name: str) -> str: ifa = netifaces.ifaddresses(name) mac = ifa[netifaces.AF_LINK][0]['addr'] return mac @property def mac_address(self): assert self.mac_address_spoofed or self.mac_address_original, 'No MAC address available.' if self.mac_address_spoofed: return self.mac_address_spoofed else: return self.mac_address_original @property def name(self): assert self.name_monitor or self.name_original, 'No interface name available.' if self.name_monitor: return self.name_monitor else: return self.name_original @property def gateway(self): gateway = None gws = netifaces.gateways() for gw in gws[netifaces.AF_INET]: if gw[1] == self.name: gateway = gw[0] break assert gateway, 'No default gateway available.' return gateway @contextmanager def monitor_mode(self, channel=None): self.start_monitor_mode(channel=channel) yield if self.monitor_mode_active: self.stop_monitor_mode()
MIT License
drexly/openhgsenti
lib/django/contrib/gis/gdal/feature.py
Feature.get
python
def get(self, field): field_name = getattr(field, 'name', field) return self[field_name].value
Returns the value of the field, instead of an instance of the Field object. May take a string of the field name or a Field object as parameters.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/gdal/feature.py#L107-L114
from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import GDALException, OGRIndexError from django.contrib.gis.gdal.field import Field from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api from django.utils import six from django.utils.encoding import force_bytes, force_text from django.utils.six.moves import range class Feature(GDALBase): def __init__(self, feat, layer): if not feat: raise GDALException('Cannot create OGR Feature, invalid pointer given.') self.ptr = feat self._layer = layer def __del__(self): if self._ptr and capi: capi.destroy_feature(self._ptr) def __getitem__(self, index): if isinstance(index, six.string_types): i = self.index(index) else: if index < 0 or index > self.num_fields: raise OGRIndexError('index out of range') i = index return Field(self, i) def __iter__(self): for i in range(self.num_fields): yield self[i] def __len__(self): return self.num_fields def __str__(self): return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name) def __eq__(self, other): return bool(capi.feature_equal(self.ptr, other._ptr)) @property def encoding(self): return self._layer._ds.encoding @property def fid(self): return capi.get_fid(self.ptr) @property def layer_name(self): name = capi.get_feat_name(self._layer._ldefn) return force_text(name, self.encoding, strings_only=True) @property def num_fields(self): return capi.get_feat_field_count(self.ptr) @property def fields(self): return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)) for i in range(self.num_fields)] @property def geom(self): geom_ptr = capi.get_feat_geom_ref(self.ptr) return OGRGeometry(geom_api.clone_geom(geom_ptr)) @property def geom_type(self): return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
Apache License 2.0
craigds/django-mpathy
mpathy/operations.py
inject_pre_migration_operations
python
def inject_pre_migration_operations(plan=None, apps=global_apps, using=DEFAULT_DB_ALIAS, **kwargs): if plan is None: return for migration, backward in plan: for index, operation in enumerate(migration.operations): if isinstance(operation, migrations.CreateModel): for name, field in operation.fields: if isinstance(field, LTreeField): migration.operations.insert(index, LTreeExtension()) return
Insert a `LTreeExtension` operation before every planned `CreateModel` operation.
https://github.com/craigds/django-mpathy/blob/dafb5b5035fbd8dc074613489d8f9ed304baba96/mpathy/operations.py#L18-L31
from psycopg2.extensions import quote_ident from django.apps import apps as global_apps from django.contrib.postgres.operations import CreateExtension from django.core.exceptions import FieldDoesNotExist from django.db import connection, DEFAULT_DB_ALIAS, migrations from .fields import LTreeField from .models import MPathNode class LTreeExtension(CreateExtension): def __init__(self): self.name = 'ltree'
BSD 3-Clause New or Revised License
gvalvano/sdtnet
architectures/decoder.py
Decoder.__init__
python
def __init__(self, z_factors, encoded_anatomy, n_channels, is_training, output='tanh', name='Decoder'): self.z_factors = z_factors self.encoded_anatomy = encoded_anatomy self.n_channels = n_channels self.is_training = is_training self.name = name assert output in ['linear', 'tanh'] self.output = output self.reconstruction = None
Decoder that generates an image by combining an anatomical and a modality representation. :param z_factors: (tensor) incoming tensor with the modality representation :param encoded_anatomy: (tensor) incoming tensor with input anatomy information :param n_channels: (int) number of anatomical channels :param is_training: (tf.placeholder(dtype=tf.bool) or bool) variable to define training or test mode; it is needed for the behaviour of dropout, batch normalization, ecc. (which behave differently at train and test time) :param output: (string) output activation function, defaults to 'tanh' (according to original paper) :param name: (string) name scope for the unet - - - - - - - - - - - - - - - - Notice that: - the network output is linear (regression task) - - - - - - - - - - - - - - - - Example of usage: # build the entire model: model = Decoder(z_factors, encoded_anatomy, is_training).build()
https://github.com/gvalvano/sdtnet/blob/4d3052c4a6dfa2c409d55efe81d0aa2451b823cc/architectures/decoder.py#L26-L59
import tensorflow as tf from tensorflow import layers from .layers.film_layer import film_layer he_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False) b_init = tf.zeros_initializer() class Decoder(object):
Apache License 2.0
krother/advanced_python
error_handling/logging/logging_example.py
factorial
python
def factorial(n=10): i = 1 factorial = 1 while i < n: logging.info('starting iteration {}'.format(i)) factorial *= i logging.debug('new factorial: {}'.format(factorial)) i += 1 logging.warning('Final result: {}'.format(factorial))
Calculates factorials with log messages.
https://github.com/krother/advanced_python/blob/749d3d7f6780109f3fa45812386e442db49cd0d7/error_handling/logging/logging_example.py#L16-L26
import logging import sys logging.basicConfig(filename='debug.log', level=logging.DEBUG)
MIT License
smdabdoub/phylotoast
phylotoast/util.py
parse_map_file
python
def parse_map_file(mapFNH): m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral
https://github.com/smdabdoub/phylotoast/blob/2087823eb15a3cbc2bc9175f6f47395d63235c9e/phylotoast/util.py#L76-L108
import errno import itertools import os import sys from textwrap import dedent as twdd from collections import namedtuple, OrderedDict, defaultdict try: from palettable.colorbrewer.qualitative import Set3_12 except ImportError as ie: sys.exit("No module named palettable") FASTARecord = namedtuple("FASTA_Record", "id descr data") def storeFASTA(fastaFNH): fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])] def parseFASTA(fastaFNH): recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
MIT License
albanie/collaborative-experts
base/base_dataset.py
BaseDataset.load_challenge_text_features
python
def load_challenge_text_features(self): text_feat_path = self.paths["challenge_text_feat_paths"][self.text_feat] if self.split_name == "public_server_test": text_path = self.challenge_test_root_feat_folder / text_feat_path caption_path = (self.challenge_test_root_feat_folder / self.paths["raw_captions_path"]) else: text_path = Path(self.root_feat) / text_feat_path caption_path = Path(self.root_feat) / self.paths["raw_captions_path"] self.text_features = memcache(text_path) self.raw_captions = memcache(caption_path)
Load the text features and raw captions to be used in the challenge.
https://github.com/albanie/collaborative-experts/blob/b41defc4fb8de451809014c970ccbe518621909f/base/base_dataset.py#L691-L703
import time import inspect import logging import json import functools from abc import abstractmethod from typing import Dict, Union, List from pathlib import Path from collections import OrderedDict import numpy as np import torch as th from numpy.random import randint from torch.utils.data import Dataset from typeguard import typechecked import data_loader from utils.util import ensure_tensor, expert_tensor_storage from zsvision.zs_utils import memcache import pickle print = functools.partial(print, flush=True) class BaseDataset(Dataset): @staticmethod @abstractmethod @typechecked def dataset_paths(training_file=None) -> Dict[str, Union[Path, str]]: raise NotImplementedError @abstractmethod def sanity_checks(self): raise NotImplementedError @abstractmethod def load_features(self): raise NotImplementedError @typechecked def __init__( self, data_dir: Path, fuse_captions: bool, spatial_feats: bool, challenge_mode: bool, eval_only: bool, use_zeros_for_missing: bool, task: str, text_agg: str, text_feat: str, split_name: str, cls_partition: str, root_feat_folder: str, challenge_test_root_feat_folder: str, text_dim: int, num_test_captions: int, restrict_train_captions: int, max_tokens: Dict[str, int], text_dropout: float, logger: logging.Logger, raw_input_dims: Dict[str, int], feat_aggregation: Dict[str, Dict], distil_params: Union[None, Dict], training_file: Union[None, str], caption_masks: Union[None, str], ce_shared_dim: Union[None, int], **kwargs, ): self.task = task self.eval_only = eval_only self.logger = logger self.challenge_mode = challenge_mode self.text_feat = text_feat self.data_dir = data_dir self.text_dim = text_dim self.spatial_feats = spatial_feats self.text_dropout = text_dropout self.restrict_train_captions = restrict_train_captions self.max_tokens = max_tokens self.cls_partition = cls_partition self.fuse_captions = fuse_captions self.num_test_captions = num_test_captions self.feat_aggregation = feat_aggregation self.root_feat = data_dir / root_feat_folder self.challenge_test_root_feat_folder = data_dir / challenge_test_root_feat_folder self.experts = set(raw_input_dims.keys()) self.distil_params = distil_params self.training_file = training_file self.restrict_test_captions = None self.text_features = None self.label_features = None self.video_labels = None self.distil_features = None self.raw_captions = None self.features = None self.captions_per_video = 1 self.ordered_experts = list(raw_input_dims.keys()) self.partition_lists = {} self.configure_train_test_splits(split_name=split_name) self.logger.info("The current task is {}".format(self.task)) self.sample_list = self.partition_lists["train"] self.num_samples = len(self.sample_list) num_val = len(self.partition_lists["val"]) self.ce_shared_dim = ce_shared_dim if caption_masks is not None: self.caption_masks = pickle.load(open(self.root_feat / Path(caption_masks), "rb")) else: self.caption_masks = None if self.task == "classification": self.sample_list = self.partition_lists[self.cls_partition] self.num_samples = len(self.sample_list) self.logger.info("The current cls_partition is {}".format(self.cls_partition)) self.num_classes = None self.class_type = None self.raw_input_dims = raw_input_dims self.video_path_retrieval = [f"videos/{x}.mp4" for x in self.partition_lists["val"]] if use_zeros_for_missing: self.MISSING_VAL = 0 else: self.MISSING_VAL = np.nan self.load_features() if text_agg == "avg": self.logger.info("averaging the text features...") for key, val in self.text_features.items(): self.text_features[key] = [np.mean(x, 0, keepdims=1) for x in val] self.logger.info("finished averaging the text features") self.trn_config = {} self.raw_config = {} self.tensor_storage = expert_tensor_storage(self.experts, self.feat_aggregation) for static_expert in self.tensor_storage["fixed"]: if static_expert in self.feat_aggregation: if "trn_seg" in self.feat_aggregation[static_expert].keys(): self.trn_config[static_expert] = self.feat_aggregation[static_expert]["trn_seg"] if "raw" in self.feat_aggregation[static_expert]["temporal"]: self.raw_config[static_expert] = 1 if self.task == "classification": return retrieval = { expert: np.zeros((num_val, self.max_tokens[expert], raw_input_dims[expert])) for expert in self.tensor_storage["variable"] } retrieval.update({expert: np.zeros((num_val, raw_input_dims[expert])) for expert in self.tensor_storage["fixed"]}) self.retrieval = retrieval self.test_ind = {expert: th.ones(num_val) for expert in self.experts} self.raw_captions_retrieval = [None] * num_val if self.task == "retrieval-as-classification": num_labels = len(self.label_features) self.text_retrieval = np.zeros((num_labels, 1, 1, self.text_dim)) self.query_masks = np.zeros((num_labels, num_val)) for ii, video_name in enumerate(self.partition_lists["val"]): labels = self.video_labels[video_name] self.query_masks[np.array(labels), ii] = 1 for ii, embedding in self.label_features.items(): self.text_retrieval[ii, :, :, :] = np.mean(embedding, axis=0, keepdims=1) elif self.task == "retrieval": self.query_masks = np.zeros((num_val, num_test_captions)) self.text_token_mask = np.zeros((num_val, num_test_captions)) self.text_retrieval = np.zeros((num_val, self.num_test_captions, self.max_tokens["text"], self.text_dim)) else: raise ValueError(f"Unrecognised task: {self.task}") for ii, video_name in enumerate(self.partition_lists["val"]): self.raw_captions_retrieval[ii] = self.raw_captions[video_name] for expert in self.tensor_storage["fixed"].intersection(self.experts): feats = self.features[expert][video_name] drop = self.has_missing_values(feats) self.test_ind[expert][ii] = not drop self.retrieval[expert][ii] = feats if drop: self.retrieval[expert][ii][:] = self.MISSING_VAL if self.feat_aggregation[expert].get("binarise", False): keep = np.logical_not(np.isnan(self.retrieval[expert][:, 0, 0])) marker = np.ones_like(self.retrieval[expert][keep]) self.retrieval[expert][keep] = marker for expert in self.tensor_storage["variable"].intersection(self.experts): feats = self.features[expert][video_name] drop = self.has_missing_values(feats) self.test_ind[expert][ii] = not drop if drop: self.retrieval[expert][ii][:] = self.MISSING_VAL if self.feat_aggregation[expert].get("binarise", False): keep = np.logical_not(np.isnan(self.retrieval[expert][:, 0, 0])) marker = np.ones_like(self.retrieval[expert][keep]) self.retrieval[expert][keep] = marker if self.test_ind[expert][ii]: keep = min(self.max_tokens[expert], len(feats)) self.retrieval[expert][ii, :keep, :] = feats[:keep] if self.task == "retrieval": candidates_sentences = self.text_features[video_name] if self.restrict_test_captions is not None: keep_sent_idx = self.restrict_test_captions[video_name] candidates_sentences = [candidates_sentences[keep_sent_idx]] self.query_masks[ii, :len(candidates_sentences)] = 1 if self.fuse_captions: text_feats = np.vstack(candidates_sentences) keep = min(len(text_feats), self.max_tokens["text"]) self.text_retrieval[ii, 0, :keep, :] = text_feats[:keep, :] self.text_token_mask[ii, 0] = keep self.query_masks[ii, :] = 1 else: for test_caption_idx in range(self.num_test_captions): if len(candidates_sentences) <= test_caption_idx: break keep = min(len(candidates_sentences[test_caption_idx]), self.max_tokens["text"]) self.text_token_mask[ii, test_caption_idx] = keep if ii % 500 == 0 and test_caption_idx == 0: msg = ( f"{ii}/{len(self.partition_lists['val'])} will evaluate " f"sentence {test_caption_idx} out of " f"{len(candidates_sentences)} (has {keep} words) " f"{video_name}" ) self.logger.info(msg) text_feats = candidates_sentences[test_caption_idx][: keep] if text_feats.size == 0: text_feats = 0 raise ValueError("empty text features!") self.text_retrieval[ii, test_caption_idx, :keep, :] = text_feats self.sanity_checks() def configure_train_test_splits(self, split_name): self.paths = type(self).dataset_paths(self.training_file) print("loading training/val splits....") tic = time.time() for subset, path in self.paths["subset_list_paths"][split_name].items(): if self.challenge_mode and split_name == "public_server_test" and subset == "val": root_feat = Path(self.challenge_test_root_feat_folder) else: root_feat = Path(self.root_feat) subset_list_path = root_feat / path if subset == "train" and self.eval_only: rows = [] else: with open(subset_list_path) as f: rows = f.read().splitlines() if isinstance(self, data_loader.DiDeMo_dataset.DiDeMo): rows = [x.strip().split(".")[0] for x in rows] self.partition_lists[subset] = rows print("done in {:.3f}s".format(time.time() - tic)) self.split_name = split_name def collate_data(self, data): batch_size = len(data) tensors = {} for expert in self.tensor_storage["fixed"]: if expert in self.trn_config.keys(): tensors[expert] = np.zeros((batch_size, self.trn_config[expert], self.raw_input_dims[expert])) else: tensors[expert] = np.zeros((batch_size, self.raw_input_dims[expert])) if self.distil_features is not None: distil = {} distil_text = {} for t in self.distil_features: distil[t] = {} distil_text[t] = {} check_moee = False if self.distil_params is not None and 'moee' in self.distil_params: if isinstance(self.distil_params['moee'], list) and self.distil_params['moee'][t] == 1: check_moee = True elif not isinstance(self.distil_params['moee'], list) and self.distil_params['moee'] == True: check_moee = True for mod in self.distil_features[t][list(self.distil_features[t].keys())[0]]['vid_embds'].keys(): if check_moee: size = self.raw_input_dims[mod] distil[t][mod] = np.zeros((batch_size, size)) distil_text[t][mod] = np.zeros((batch_size, 1, size)) else: distil[t][mod] = np.zeros((batch_size, self.ce_shared_dim)) distil_text[t][mod] = np.zeros((batch_size, 1, self.ce_shared_dim)) ind = {expert: np.zeros(batch_size) for expert in self.experts} tensors.update({expert: np.zeros( (batch_size, self.max_tokens[expert], self.raw_input_dims[expert]) ) for expert in self.tensor_storage["variable"]}) if "retrieval" in self.task: text_tensor = np.zeros((batch_size, self.captions_per_video, self.max_tokens["text"], self.text_dim)) text_token_mask = np.zeros((batch_size, self.captions_per_video)) elif "classification" in self.task and self.class_type == "single_label": label_tensor = np.zeros(batch_size) vid_name = [] elif "classification" in self.task and self.class_type == "multi_label": label_tensor = np.zeros((batch_size, self.num_classes)) vid_name = [] for ii, _ in enumerate(data): datum = data[ii] for expert in self.experts: ind[expert][ii] = datum[f"{expert}_ind"] for expert in self.tensor_storage["fixed"]: tensors[expert][ii] = datum[expert] for expert in self.tensor_storage["variable"]: if ind[expert][ii]: keep = min(len(datum[expert]), self.max_tokens[expert]) if keep: tensors[expert][ii, :keep, :] = datum[expert][:keep] else: tensors[expert][ii, :, :] = self.MISSING_VAL if self.distil_features is not None: for t in datum['distil_mods']: for mod in datum['distil_mods'][t]: distil[t][mod][ii] = datum['distil_mods'][t][mod] for t in datum['distil_mods']: for mod in datum['distil_texts'][t]: distil_text[t][mod][ii] = datum['distil_texts'][t][mod] if "retrieval" in self.task: text = datum["text"] for jj in range(self.captions_per_video): keep = min(len(text[jj]), self.max_tokens["text"]) text_tensor[ii, jj, :keep, :] = text[jj][:keep] text_token_mask[ii, jj] = keep elif self.task == "classification": if self.cls_partition != 'test': label_tensor[ii] = datum["labels"] vid_name.append(datum["vid"]) ind = {key: ensure_tensor(val) for key, val in ind.items()} experts = OrderedDict( (expert, th.from_numpy(tensors[expert]).float()) for expert in self.ordered_experts) if self.distil_features is not None: for t in distil: distil[t] = OrderedDict( (expert, th.from_numpy(distil[t][expert]).float()) for expert in distil[t]) distil_text[t] = OrderedDict( (expert, th.from_numpy(distil_text[t][expert]).float()) for expert in distil_text[t]) for expert in self.experts: if self.feat_aggregation[expert].get("binarise", False): replace = np.logical_not(th.isnan(experts[expert][:, 0, 0])) experts[expert][replace] = th.ones_like(experts[expert][replace]) minibatch = {"experts": experts, "ind": ind} if self.distil_features is not None: minibatch["distil_video"] = distil minibatch["distil_text"] = distil_text if "retrieval" in self.task: minibatch["text"] = th.from_numpy(text_tensor).float() minibatch["text_token_mask"] = th.from_numpy(text_token_mask) elif self.task == "classification": if self.cls_partition != 'test': minibatch["labels"] = th.from_numpy(label_tensor).float() if self.cls_partition != "train": minibatch["vid_name"] = vid_name return minibatch def __len__(self): return self.num_samples def __getitem__(self, idx): if idx < self.num_samples: vid = self.sample_list[idx] if self.caption_masks is not None: caption_masks = self.caption_masks[vid] else: caption_masks = None if self.distil_features is not None: distil_mod_feats = {} distil_text_feats = {} for t in self.distil_features: distil_mod_feats[t] = {} distil_text_feats[t] = {} for k in self.distil_features[t][vid]: for mod in self.distil_features[t][vid][k]: if k == 'vid_embds': distil_mod_feats[t][mod] = self.distil_features[t][vid][k][mod] elif k == 'text_embds': distil_text_feats[t][mod] = self.distil_features[t][vid][k][mod] features = {} for expert in self.experts: if expert not in self.trn_config.keys(): try: if expert in self.raw_config.keys(): features[expert] = np.mean(self.features[expert][vid], axis=0) else: features[expert] = self.features[expert][vid] except: import ipdb; ipdb.set_trace() else: raw_frame_feats = self.features[expert][vid] new_length = 1 num_frames = raw_frame_feats.shape[0] avg_duration = ((num_frames - new_length + 1) // self.trn_config[expert]) assert avg_duration > 0, "average duration must be positive" if avg_duration > 0: offsets = np.multiply(list(range(self.trn_config[expert])), avg_duration) offsets += randint(avg_duration, size=self.trn_config[expert]) new_frame_feats = np.zeros((self.trn_config[expert], raw_frame_feats.shape[1])) for idx, xx in enumerate(offsets): new_frame_feats[idx, :] = raw_frame_feats[xx, :] msg = "returning a wrong feature != segment num" assert new_frame_feats.shape[0] == self.trn_config[expert], msg features[expert] = new_frame_feats ind = {} for expert in self.ordered_experts: if expert in self.tensor_storage["flaky"]: ind[expert] = not self.has_missing_values(features[expert]) else: ind[expert] = 1 if self.task in {"retrieval", "retrieval-as-classification"}: text = self.text_features[vid] if self.fuse_captions: text = [np.vstack(text)] pick = None elif isinstance(text, list): if caption_masks is not None: probability = caption_masks / np.sum(caption_masks) pick = np.random.choice(len(text), size=self.captions_per_video, p=probability) assert caption_masks[pick] == 1 else: pick = np.random.choice(len(text), size=self.captions_per_video) text = np.array(text)[pick] if self.distil_features is not None: for t in distil_text_feats: for mod in distil_text_feats[t]: distil_text_feats[t][mod] = np.array(distil_text_feats[t][mod])[pick] else: pick = None text = np.random.choice(text, size=self.captions_per_video) if np.random.random() < self.text_dropout: if pick is not None: mask = np.random.random(len(text[0])) text = [text[0][mask > 0.5]] else: raise NotImplementedError("TODO: Add dropouot for picked text") if self.task in {"retrieval", "retrieval-as-classification"}: if self.distil_features is not None: sample = {"text": text, "distil_mods": distil_mod_feats, "distil_texts": distil_text_feats} else: sample = {"text": text} elif self.task == "classification": if self.class_type == "single_label": labels = self.video_labels[vid] assert len(labels) == 1, "expected single label" labels = labels[0] elif self.class_type == "multi_label": if self.cls_partition != 'test': labels = np.zeros(self.num_classes) labels[self.video_labels[vid]] = 1 else: raise ValueError(f"unknown label class type: {self.class_type}") sample = {} if self.cls_partition != 'test': sample = {"labels": labels} sample.update({"vid": vid}) else: raise ValueError(f"unknown task: {self.task}") sample.update({f"{key}_ind": val for key, val in ind.items()}) sample.update(features) return sample def get_retrieval_data(self): experts = OrderedDict( (expert, th.from_numpy(self.retrieval[expert]).float()) for expert in self.ordered_experts ) retrieval_data = { "text": ensure_tensor(self.text_retrieval).float(), "experts": experts, "ind": self.test_ind, } meta = { "query_masks": self.query_masks, "raw_captions": self.raw_captions_retrieval, "paths": self.video_path_retrieval, } return retrieval_data, meta def has_missing_values(self, x): return isinstance(x, float) and np.isnan(x) def visual_feat_paths(self, model_spec, tag=None): if model_spec not in self.ordered_experts: self.logger.info(f"Skipping load for {model_spec} (feature not requested)") return f"SKIPPED-{model_spec}" feat_type, model_name, _ = model_spec.split(".") aggs = self.feat_aggregation[model_spec] base = f"aggregated_{feat_type.replace('-', '_')}" required = ("fps", "pixel_dim", "stride") fps, pixel_dim, stride = [aggs.get(x, None) for x in required] if feat_type in {"facecrops", "faceboxes"}: base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}" elif feat_type not in {"ocr", "speech", "audio"}: base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}" for option in "offset", "inner_stride", "num_segments": if aggs.get(option, None) is not None: base += f"_{option}{aggs[option]}" feat_paths = [] for agg in aggs["temporal"].split("-"): fname = f"{model_name}-{agg}" if aggs["type"] == "logits": fname = f"{fname}-logits" if tag is not None: fname += f"-{tag}" feat_paths.append(Path(base) / f"{fname}.pickle") return feat_paths def log_assert(self, bool_, msg="", verbose=True): try: assert bool_, msg except AssertionError: last_stackframe = inspect.stack()[-2] source_file, line_no, func = last_stackframe[1:4] source = f"Traceback (most recent call last):\n" + f" File {source_file}, line {line_no}, in {func}\n" if verbose: source_code = open(source_file).readlines() source += "".join(source_code[line_no - 3:line_no + 1]) else: source += last_stackframe[-2][0].strip() self.logger.debug(f"{msg}\n{source}") raise AssertionError(f"{msg}\n{source}") def summary_stats(self): self.logger.info("Computing feature stats...") queries = self.ordered_experts + ["text"] for subset, keep in self.partition_lists.items(): keep = set(keep) print(f"Summary for {subset}") for expert in queries: if expert in self.features: feats = self.features[expert] else: feats = self.text_features vals = [feats[key] for key in keep] missing = 0 sizes = [] for val in vals: if self.has_missing_values(val): missing += 1 else: sizes.append(len(val)) if sizes: stat_str = (f"min: {np.min(sizes):4}, " f"max: {np.max(sizes):4}, " f"mean: {np.mean(sizes):.1f}") print(f"{subset}: missing: {missing:4}, {stat_str} {expert}") @staticmethod @typechecked def common_text_feat_paths() -> Dict[str, str]: with open("model/text_embedding_models.json") as f: supported_text_embeddings = json.load(f) return {name: f"{name}.pkl" for name in supported_text_embeddings} @staticmethod @typechecked def common_feat_names() -> List[str]: feature_names = [ "imagenet.senet154.0", "scene.densenet161.0", "imagenet.resnext101_32x48d.0", "trn.moments-trn.0", "moments_2d.resnet50.0", "i3d.i3d.0", "i3d.i3d.1", "s3dg.s3dg.0", "s3dg.s3dg.1", "r2p1d.r2p1d-ig65m.0", "r2p1d.r2p1d-ig65m.1", "r2p1d.r2p1d-ig65m-kinetics.0", "r2p1d.r2p1d-ig65m-kinetics.1", "moments_3d.moments-resnet3d50.0", "moments_3d.moments-resnet3d50.1" ] return feature_names
Apache License 2.0
mattvonrocketstein/smash
smashlib/ipy3x/kernel/manager.py
run_kernel
python
def run_kernel(**kwargs): km, kc = start_new_kernel(**kwargs) try: yield kc finally: kc.stop_channels() km.shutdown_kernel(now=True)
Context manager to create a kernel in a subprocess. The kernel is shut down when the context exits. Returns ------- kernel_client: connected KernelClient instance
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/kernel/manager.py#L445-L459
from __future__ import absolute_import from contextlib import contextmanager import os import re import signal import sys import time import warnings try: from queue import Empty except ImportError: from Queue import Empty import zmq from IPython.utils.importstring import import_item from IPython.utils.localinterfaces import is_local_ip, local_ips from IPython.utils.path import get_ipython_dir from IPython.utils.traitlets import ( Any, Instance, Unicode, List, Bool, Type, DottedObjectName ) from IPython.kernel import ( launch_kernel, kernelspec, ) from .connect import ConnectionFileMixin from .zmq.session import Session from .managerabc import ( KernelManagerABC ) class KernelManager(ConnectionFileMixin): context = Instance(zmq.Context) def _context_default(self): return zmq.Context.instance() client_class = DottedObjectName( 'IPython.kernel.blocking.BlockingKernelClient') client_factory = Type() def _client_class_changed(self, name, old, new): self.client_factory = import_item(str(new)) kernel = Any() kernel_spec_manager = Instance(kernelspec.KernelSpecManager) def _kernel_spec_manager_default(self): return kernelspec.KernelSpecManager(ipython_dir=self.ipython_dir) kernel_name = Unicode(kernelspec.NATIVE_KERNEL_NAME) kernel_spec = Instance(kernelspec.KernelSpec) def _kernel_spec_default(self): return self.kernel_spec_manager.get_kernel_spec(self.kernel_name) def _kernel_name_changed(self, name, old, new): if new == 'python': self.kernel_name = kernelspec.NATIVE_KERNEL_NAME return self.kernel_spec = self.kernel_spec_manager.get_kernel_spec(new) self.ipython_kernel = new in {'python', 'python2', 'python3'} kernel_cmd = List(Unicode, config=True, help="""DEPRECATED: Use kernel_name instead. The Popen Command to launch the kernel. Override this if you have a custom kernel. If kernel_cmd is specified in a configuration file, IPython does not pass any arguments to the kernel, because it cannot make any assumptions about the arguments that the kernel understands. In particular, this means that the kernel does not receive the option --debug if it given on the IPython command line. """ ) def _kernel_cmd_changed(self, name, old, new): warnings.warn("Setting kernel_cmd is deprecated, use kernel_spec to " "start different kernels.") self.ipython_kernel = False ipython_kernel = Bool(True) ipython_dir = Unicode() def _ipython_dir_default(self): return get_ipython_dir() _launch_args = Any() _control_socket = Any() _restarter = Any() autorestart = Bool(False, config=True, help="""Should we autorestart the kernel if it dies.""" ) def __del__(self): self._close_control_socket() self.cleanup_connection_file() def start_restarter(self): pass def stop_restarter(self): pass def add_restart_callback(self, callback, event='restart'): if self._restarter is None: return self._restarter.add_callback(callback, event) def remove_restart_callback(self, callback, event='restart'): if self._restarter is None: return self._restarter.remove_callback(callback, event) def client(self, **kwargs): if self.client_factory is None: self.client_factory = import_item(self.client_class) kw = {} kw.update(self.get_connection_info()) kw.update(dict( connection_file=self.connection_file, session=self.session, parent=self, )) kw.update(kwargs) return self.client_factory(**kw) def format_kernel_cmd(self, extra_arguments=None): extra_arguments = extra_arguments or [] if self.kernel_cmd: cmd = self.kernel_cmd + extra_arguments else: cmd = self.kernel_spec.argv + extra_arguments ns = dict(connection_file=self.connection_file) ns.update(self._launch_args) pat = re.compile(r'\{([A-Za-z0-9_]+)\}') def from_ns(match): return ns.get(match.group(1), match.group()) return [pat.sub(from_ns, arg) for arg in cmd] def _launch_kernel(self, kernel_cmd, **kw): return launch_kernel(kernel_cmd, **kw) def _connect_control_socket(self): if self._control_socket is None: self._control_socket = self.connect_control() self._control_socket.linger = 100 def _close_control_socket(self): if self._control_socket is None: return self._control_socket.close() self._control_socket = None def start_kernel(self, **kw): if self.transport == 'tcp' and not is_local_ip(self.ip): raise RuntimeError("Can only launch a kernel on a local interface. " "Make sure that the '*_address' attributes are " "configured properly. " "Currently valid addresses are: %s" % local_ips() ) self.write_connection_file() self._launch_args = kw.copy() extra_arguments = kw.pop('extra_arguments', []) kernel_cmd = self.format_kernel_cmd(extra_arguments=extra_arguments) if self.kernel_cmd: env = os.environ else: env = os.environ.copy() env.update(self.kernel_spec.env or {}) self.kernel = self._launch_kernel(kernel_cmd, env=env, **kw) self.start_restarter() self._connect_control_socket() def request_shutdown(self, restart=False): content = dict(restart=restart) msg = self.session.msg("shutdown_request", content=content) self.session.send(self._control_socket, msg) def finish_shutdown(self, waittime=1, pollinterval=0.1): for i in range(int(waittime / pollinterval)): if self.is_alive(): time.sleep(pollinterval) else: break else: if self.has_kernel: self._kill_kernel() def cleanup(self, connection_file=True): if connection_file: self.cleanup_connection_file() self.cleanup_ipc_files() self._close_control_socket() def shutdown_kernel(self, now=False, restart=False): self.stop_restarter() if now: self._kill_kernel() else: self.request_shutdown(restart=restart) self.finish_shutdown() self.cleanup(connection_file=not restart) def restart_kernel(self, now=False, **kw): if self._launch_args is None: raise RuntimeError("Cannot restart the kernel. " "No previous call to 'start_kernel'.") else: self.shutdown_kernel(now=now, restart=True) self._launch_args.update(kw) self.start_kernel(**self._launch_args) @property def has_kernel(self): return self.kernel is not None def _kill_kernel(self): if self.has_kernel: try: self.kernel.kill() except OSError as e: if sys.platform == 'win32': if e.winerror != 5: raise else: from errno import ESRCH if e.errno != ESRCH: raise self.kernel.wait() self.kernel = None else: raise RuntimeError("Cannot kill kernel. No kernel is running!") def interrupt_kernel(self): if self.has_kernel: if sys.platform == 'win32': from .zmq.parentpoller import ParentPollerWindows as Poller Poller.send_interrupt(self.kernel.win32_interrupt_event) else: self.kernel.send_signal(signal.SIGINT) else: raise RuntimeError( "Cannot interrupt kernel. No kernel is running!") def signal_kernel(self, signum): if self.has_kernel: self.kernel.send_signal(signum) else: raise RuntimeError("Cannot signal kernel. No kernel is running!") def is_alive(self): if self.has_kernel: if self.kernel.poll() is None: return True else: return False else: return False KernelManagerABC.register(KernelManager) def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs): km = KernelManager(kernel_name=kernel_name) km.start_kernel(**kwargs) kc = km.client() kc.start_channels() kc.kernel_info() kc.get_shell_msg(block=True, timeout=startup_timeout) for channel in (kc.shell_channel, kc.iopub_channel): while True: try: channel.get_msg(block=True, timeout=0.1) except Empty: break return km, kc @contextmanager
MIT License
zhmcclient/zhmc-ansible-modules
plugins/module_utils/common.py
get_session
python
def get_session(faked_session, host, userid, password, ca_certs, verify): if isinstance(faked_session, FakedSession): return faked_session else: verify_cert = ca_certs if verify else False return Session(host, userid, password, verify_cert=verify_cert)
Return a session object for the HMC. Parameters: faked_session (zhmcclient_mock.FakedSession or None): If this object is a `zhmcclient_mock.FakedSession` object, return that object. Else, return a new `zhmcclient.Session` object from the other arguments.
https://github.com/zhmcclient/zhmc-ansible-modules/blob/95ee6363448e917906dc9c7925671eab0357542e/plugins/module_utils/common.py#L372-L386
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import logging import traceback import platform import sys from ansible.module_utils import six try: from zhmcclient import Session IMP_ZHMCCLIENT = True except ImportError: IMP_ZHMCCLIENT = False IMP_ZHMCCLIENT_ERR = traceback.format_exc() try: from zhmcclient_mock import FakedSession IMP_ZHMCCLIENT_MOCK = True except ImportError: IMP_ZHMCCLIENT_MOCK = False IMP_ZHMCCLIENT_ERR_MOCK = traceback.format_exc() class Error(Exception): pass class ParameterError(Error): pass class StatusError(Error): pass START_END_STATUSES = ('active', 'degraded', 'reservation-error') STOP_END_STATUSES = ('stopped', 'terminated', 'paused') BAD_STATUSES = ('communications-not-active', 'status-check') def missing_required_lib(library, reason=None, url=None): hostname = platform.node() msg = "Failed to import the required Python library " "(%s) on %s's Python %s." % (library, hostname, sys.executable) if reason: msg += " This is required %s." % reason if url: msg += " See %s for more info." % url msg += (" Please read module documentation and install in the appropriate " "location. If the required library is installed, but Ansible is " "using the wrong Python interpreter, please consult the " "documentation on ansible_python_interpreter") return msg def eq_hex(hex_actual, hex_new, prop_name): if hex_actual: try: int_actual = int(hex_actual, 16) except ValueError: raise ParameterError( "Unexpected: Actual value of property {0!r} is not a valid " "hex number: {1!r}".format(prop_name, hex_actual)) else: int_actual = None if hex_new: try: int_new = int(hex_new, 16) except ValueError: raise ParameterError( "New value for property {0!r} is not a valid " "hex number: {1!r}".format(prop_name, hex_new)) else: int_new = None return int_actual == int_new def _normalized_mac(mac_str): mac_ints = [int(h, 16) for h in mac_str.split(':')] mac_str = ':'.join(["%02x" % i for i in mac_ints]) return mac_str def eq_mac(mac_actual, mac_new, prop_name): if mac_actual: try: mac_actual = _normalized_mac(mac_actual) except ValueError: raise ParameterError( "Unexpected: Actual value of property {0!r} is not a valid " "MAC address: {1!r}".format(prop_name, mac_actual)) else: mac_actual = None if mac_new: try: mac_new = _normalized_mac(mac_new) except ValueError: raise ParameterError( "New value for property {0!r} is not a valid " "MAC address: {1!r}".format(prop_name, mac_new)) else: mac_new = None return mac_actual == mac_new def get_hmc_auth(hmc_auth): try: userid = hmc_auth['userid'] except KeyError: raise ParameterError("Required item 'userid' is missing in " "dictionary module parameter 'hmc_auth'.") try: password = hmc_auth['password'] except KeyError: raise ParameterError("Required item 'password' is missing in " "dictionary module parameter 'hmc_auth'.") ca_certs = hmc_auth.get('ca_certs', None) verify = hmc_auth.get('verify', True) return userid, password, ca_certs, verify def pull_partition_status(partition): parts = partition.manager.cpc.partitions.list( filter_args={'name': partition.name}) if len(parts) != 1: raise AssertionError() this_part = parts[0] actual_status = this_part.get_property('status') return actual_status def stop_partition(partition, check_mode): changed = False partition.pull_full_properties() status = partition.get_property('status') if status in BAD_STATUSES: raise StatusError( "Target CPC {0!r} has issues; status of partition {1!r} is: {2!r}". format(partition.manager.cpc.name, partition.name, status)) elif status == 'stopped': pass elif status == 'starting': if not check_mode: partition.wait_for_status(START_END_STATUSES) start_end_status = pull_partition_status(partition) partition.stop() status = pull_partition_status(partition) if status != 'stopped': raise StatusError( "Could not get partition {0!r} from {1!r} status into " "'stopped' status after waiting for its starting to " "complete; current status is: {2!r}". format(partition.name, start_end_status, status)) changed = True elif status == 'stopping': if not check_mode: partition.wait_for_status(STOP_END_STATUSES) stop_end_status = pull_partition_status(partition) if stop_end_status != 'stopped': partition.stop() status = pull_partition_status(partition) if status != 'stopped': raise StatusError( "Could not get partition {0!r} from {1!r} status into " "'stopped' status after waiting for its stopping to " "complete; current status is: {2!r}". format(partition.name, stop_end_status, status)) changed = True else: if not check_mode: previous_status = pull_partition_status(partition) partition.stop() status = pull_partition_status(partition) if status != 'stopped': raise StatusError( "Could not get partition {0!r} from {1!r} status into " "'stopped' status; current status is: {2!r}". format(partition.name, previous_status, status)) changed = True return changed def start_partition(partition, check_mode): changed = False partition.pull_full_properties() status = partition.get_property('status') if status in BAD_STATUSES: raise StatusError( "Target CPC {0!r} has issues; status of partition {1!r} is: {2!r}". format(partition.manager.cpc.name, partition.name, status)) elif status in START_END_STATUSES: pass elif status == 'stopping': if not check_mode: partition.wait_for_status(STOP_END_STATUSES) stop_end_status = pull_partition_status(partition) partition.start() status = pull_partition_status(partition) if status not in START_END_STATUSES: raise StatusError( "Could not get partition {0!r} from {1!r} status into " "a started status after waiting for its stopping to " "complete; current status is: {2!r}". format(partition.name, stop_end_status, status)) changed = True elif status == 'starting': if not check_mode: partition.wait_for_status(START_END_STATUSES) changed = True else: if not check_mode: previous_status = pull_partition_status(partition) partition.start() status = pull_partition_status(partition) if status not in START_END_STATUSES: raise StatusError( "Could not get partition {0!r} from {1!r} status into " "a started status; current status is: {2!r}". format(partition.name, previous_status, status)) changed = True return changed def wait_for_transition_completion(partition): partition.pull_full_properties() status = partition.get_property('status') if status in BAD_STATUSES: raise StatusError( "Target CPC {0!r} has issues; status of partition {1!r} is: {2!r}". format(partition.manager.cpc.name, partition.name, status)) elif status == 'stopping': partition.wait_for_status(STOP_END_STATUSES) elif status == 'starting': partition.wait_for_status(START_END_STATUSES) else: if not (status in START_END_STATUSES or status in STOP_END_STATUSES): raise AssertionError()
Apache License 2.0
midnighter/structurizr-python
src/structurizr/view/static_view.py
StaticView.add_all_people
python
def add_all_people(self) -> None: for person in self.model.people: self.add(person)
Add all people in the model to this view.
https://github.com/midnighter/structurizr-python/blob/9d482a5ad5a4a867b0b6e798ced137c5f1e1ac25/src/structurizr/view/static_view.py#L93-L96
from abc import ABC, abstractmethod from typing import Dict, Iterable, List, Optional, Type, Union from ..model import Container, Element, Person, SoftwareSystem from .animation import Animation, AnimationIO from .view import View, ViewIO __all__ = ("StaticView", "StaticViewIO") class StaticViewIO(ViewIO, ABC): animations: List[AnimationIO] = [] class StaticView(View, ABC): def __init__( self, *, animations: Optional[Iterable[Animation]] = None, **kwargs ) -> None: super().__init__(**kwargs) self.animations = [] if animations is None else list(animations) @classmethod def hydrate_arguments(cls, static_view_io: StaticViewIO) -> Dict: return { **super().hydrate_arguments(static_view_io), "animations": map(Animation.hydrate, static_view_io.animations), } @abstractmethod def add_all_elements(self) -> None: pass def add( self, element: Union[Person, SoftwareSystem], add_relationships: bool = True, ) -> None: self._add_element(element, add_relationships)
Apache License 2.0
tmarenko/mff_auto
lib/game/routines/alliance.py
Alliance.collect_energy_from_challenges
python
def collect_energy_from_challenges(self, collect_daily=True, collect_weekly=True): if not collect_daily and not collect_weekly: logger.info("Nothing to collect.") return self.game.go_to_main_menu() self.game.go_to_alliance() if not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHALLENGES_TAB): logger.error(f"Can't find {ui.ALLIANCE_CHALLENGES_TAB} tab, exiting.") return self.game.go_to_main_menu() self.emulator.click_button(ui.ALLIANCE_CHALLENGES_TAB) if collect_daily and wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHALLENGES_DAILY_ENERGY): logger.info("Collecting daily energy from challenge.") self.emulator.click_button(ui.ALLIANCE_CHALLENGES_DAILY_ENERGY) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHALLENGES_REWARD_CLOSE): self.emulator.click_button(ui.ALLIANCE_CHALLENGES_REWARD_CLOSE) if collect_weekly and wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHALLENGES_WEEKLY_ENERGY): logger.info("Collecting weekly energy from challenge.") self.emulator.click_button(ui.ALLIANCE_CHALLENGES_WEEKLY_ENERGY) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHALLENGES_REWARD_CLOSE): self.emulator.click_button(ui.ALLIANCE_CHALLENGES_REWARD_CLOSE) self.game.go_to_main_menu()
Collects energy from Alliance Challenges. :param bool collect_daily: collect daily rewards or not. :param bool collect_weekly: collect weekly rewards or not.
https://github.com/tmarenko/mff_auto/blob/e5d150c4a76d13f05652bbde811c4c5cd0f2246e/lib/game/routines/alliance.py#L168-L197
import lib.logger as logging from lib.functions import wait_until, r_sleep from lib.game import ui from lib.game.notifications import Notifications logger = logging.get_logger(__name__) class Alliance(Notifications): class STORE_ITEM: ENERGY = "ALLIANCE_STORE_ENERGY_ITEM_1" UNIFORM_EXP_CHIP = "ALLIANCE_STORE_UNIFORM_EXP_CHIP_ITEM_2" HIDDEN_TICKET = "ALLIANCE_STORE_HIDDEN_TICKET_ITEM_3" BOOST_POINT = "ALLIANCE_STORE_BOOST_POINT_ITEM_4" class SUPPORT_ITEM: NORN_STONE_OF_STRENGTH = "ALLIANCE_SUPPORT_REQUEST_NORN_STONE_OF_STRENGTH" NORN_STONE_OF_ENERGY = "ALLIANCE_SUPPORT_REQUEST_NORN_STONE_OF_ENERGY" NORN_STONE_OF_BRILLIANCE = "ALLIANCE_SUPPORT_REQUEST_NORN_STONE_OF_BRILLIANCE" NORN_STONE_OF_OMNIPOTENCE = "ALLIANCE_SUPPORT_REQUEST_NORN_STONE_OF_OMNIPOTENCE" BLACK_ANTI_MATTER = "ALLIANCE_SUPPORT_REQUEST_BLACK_ANTI_MATTER" NORN_STONE_OF_CHAOS = "ALLIANCE_SUPPORT_REQUEST_NORN_STONE_OF_CHAOS" MKRAAN_SHARD = "ALLIANCE_SUPPORT_REQUEST_MKRAAN_SHARD" PHOENIX_FEATHER = "ALLIANCE_SUPPORT_REQUEST_PHOENIX_FEATHER" MKRAAN_CRYSTAL = "ALLIANCE_SUPPORT_REQUEST_MKRAAN_CRYSTAL" GEAR_UP_KIT = "ALLIANCE_SUPPORT_REQUEST_GEAR_UP_KIT" DIMENSION_DEBRIS = "ALLIANCE_SUPPORT_REQUEST_DIMENSION_DEBRIS" ON_SECOND_LIST = [MKRAAN_SHARD, PHOENIX_FEATHER, MKRAAN_CRYSTAL, GEAR_UP_KIT, DIMENSION_DEBRIS] def check_in(self): self.game.go_to_alliance() if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHECK_IN): self.emulator.click_button(ui.ALLIANCE_CHECK_IN) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_CHECK_IN_CLOSE): self.emulator.click_button(ui.ALLIANCE_CHECK_IN_CLOSE) self.game.go_to_main_menu() def donate_resources(self, donate_gold=True, donate_memento=True): if not donate_gold and not donate_memento: logger.info("Nothing to donate.") return self.game.go_to_main_menu() self.game.go_to_alliance() if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_DONATE): self.emulator.click_button(ui.ALLIANCE_DONATE) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_DONATION_MENU): if donate_gold: logger.debug("Maxing GOLD for donation.") self.emulator.click_button(ui.ALLIANCE_DONATION_MAX_GOLD) if donate_memento: logger.debug("Maxing ALLIANCE MEMENTO for donation.") self.emulator.click_button(ui.ALLIANCE_DONATION_MAX_MEMENTO) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_DONATION_CONFIRM): logger.info("Donating resources for Alliance.") self.emulator.click_button(ui.ALLIANCE_DONATION_CONFIRM) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_DONATION_REWARD_CLOSE): logger.info("Resources were donated, exiting.") self.emulator.click_button(ui.ALLIANCE_DONATION_REWARD_CLOSE) else: logger.warning("Can't donate resource for Alliance. Probably already donated, exiting.") self.emulator.click_button(ui.ALLIANCE_DONATION_CANCEL) self.game.go_to_main_menu() def buy_items_from_store(self, items=None, buy_all_available=True): self.game.go_to_alliance() if not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_STORE_TAB): logger.error(f"Can't find {ui.ALLIANCE_STORE_TAB} tab, exiting.") return self.game.go_to_main_menu() self.emulator.click_button(ui.ALLIANCE_STORE_TAB) self.game.close_ads() if isinstance(items, str): items = [items] for item in items: logger.debug(f"Trying to buy {item}.") bought = self._buy_item_once(item) if buy_all_available and bought: while bought: logger.debug(f"Trying to buy {item} again.") bought = self._buy_item_once(item) self.game.go_to_main_menu() def _buy_item_once(self, item): if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.get_by_name(item)): self.emulator.click_button(ui.get_by_name(item)) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_STORE_PURCHASE): logger.debug("Purchasing via Alliance Tokens.") self.emulator.click_button(ui.ALLIANCE_STORE_PURCHASE) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_STORE_PURCHASE_CLOSE): logger.info("Item was bought.") self.emulator.click_button(ui.ALLIANCE_STORE_PURCHASE_CLOSE) return True if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_STORE_PURCHASE_NO_TOKENS): logger.info("Not enough Alliance Tokens for purchase.") self.emulator.click_button(ui.ALLIANCE_STORE_PURCHASE_NO_TOKENS) return False if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_STORE_PURCHASE_LIMIT): logger.info("Reached daily limit for purchasing.") self.emulator.click_button(ui.ALLIANCE_STORE_PURCHASE_LIMIT) return False logger.warning(f"Item {item} was not found in the Alliance Store.") return False def request_support_item(self, support_item): self.game.go_to_alliance() if not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_TAB): logger.error(f"Can't find {ui.ALLIANCE_SUPPORT_TAB} tab, exiting.") return self.game.go_to_main_menu() self.emulator.click_button(ui.ALLIANCE_SUPPORT_TAB) self.claim_support_item() if not wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_REQUEST): logger.warning("Can't request support item for now, exiting.") return self.game.go_to_main_menu() self.emulator.click_button(ui.ALLIANCE_SUPPORT_REQUEST) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_REQUEST_MENU): if support_item in self.SUPPORT_ITEM.ON_SECOND_LIST: self._drag_support_item_list() logger.debug(f"Sending support request for item {support_item}.") self.emulator.click_button(ui.get_by_name(support_item)) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_REQUEST_CONFIRM): self.emulator.click_button(ui.ALLIANCE_SUPPORT_REQUEST_CONFIRM) r_sleep(1) self.game.go_to_main_menu() def claim_support_item(self): if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_CLAIM): logger.info("Claiming previous support request.") self.emulator.click_button(ui.ALLIANCE_SUPPORT_CLAIM) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.ALLIANCE_SUPPORT_CLAIM_CLOSE): self.emulator.click_button(ui.ALLIANCE_SUPPORT_CLAIM_CLOSE) return True return False def _drag_support_item_list(self): logger.debug("Dragging list to the bottom.") self.emulator.drag(ui.ALLIANCE_SUPPORT_REQUEST_MENU_DRAG_BOTTOM, ui.ALLIANCE_SUPPORT_REQUEST_MENU_DRAG_TOP) r_sleep(1)
Apache License 2.0
parrot-developers/olympe
src/olympe/arsdkng/drone.py
ControllerBase._disable_legacy_video_streaming_impl
python
def _disable_legacy_video_streaming_impl(self): try: ret = self._send_command_impl(ardrone3.mediaStreaming.VideoEnable, 0) except Exception as e: return makeReturnTuple( ErrorCodeDrone.ERROR_BAD_STATE, "MediaStreaming_VideoEnable 0 Failed {}".format(str(e)) ) if ret != ErrorCodeDrone.OK: return makeReturnTuple( ErrorCodeDrone.ERROR_BAD_STATE, "MediaStreaming_VideoEnable 0 Failed") else: return makeReturnTuple( ErrorCodeDrone.OK, "MediaStreaming_VideoEnable 0 Success") return ret
Disable the streaming on legacy drones (pre-anafi)
https://github.com/parrot-developers/olympe/blob/08cd2d2f5880b766f94886c530f367f9b4cfc491/src/olympe/arsdkng/drone.py#L535-L554
from __future__ import unicode_literals from __future__ import print_function from __future__ import absolute_import from future.builtins import str, bytes import ctypes import datetime import functools import inspect import json import olympe_deps as od import pprint import re import time from tzlocal import get_localzone from collections import OrderedDict from logging import getLogger from warnings import warn import olympe.arsdkng.enums as enums import olympe.arsdkng.messages as messages from olympe.arsdkng.expectations import ( AbstractScheduler, Scheduler, FailedExpectation) from olympe.arsdkng.backend import Backend from olympe.arsdkng.discovery import DiscoveryNet, DiscoveryNetRaw, SKYCTRL_DEVICE_TYPE_LIST from olympe.arsdkng.pdraw import Pdraw, PDRAW_LOCAL_STREAM_PORT from olympe.arsdkng.pdraw import PDRAW_LOCAL_CONTROL_PORT from olympe.media import Media from olympe.tools.error import ErrorCodeDrone from olympe._private import makeReturnTuple, DEFAULT_FLOAT_TOL, py_object_cast, callback_decorator from olympe._private.controller_state import ControllerState from olympe._private.pomp_loop_thread import Future from olympe._private.format import columns as format_columns from olympe.messages import ardrone3 from olympe.messages import common from olympe.messages import skyctrl from olympe.messages import drone_manager from olympe.enums import drone_manager as drone_manager_enums from concurrent.futures import TimeoutError as FutureTimeoutError def ensure_connected(function): @functools.wraps(function) def wrapper(self, *args, **kwargs): if not self._device_conn_status.connected: self.logger.info( "Disconnection has been detected, reconnection will be done") if not self.connect(): self.logger.error("Cannot make connection") return makeReturnTuple( ErrorCodeDrone.ERROR_CONNECTION, "Cannot make connection" ) result = function(self, *args, **kwargs) return result return wrapper class ControllerBase(AbstractScheduler): def __init__(self, ip_addr, name=None, dcport=44444, drone_type=0, is_skyctrl=None, video_buffer_queue_size=8, media_autoconnect=True): self._name = name self._device_name = None if self._name is not None: self.logger = getLogger("olympe.{}.drone".format(name)) else: self.logger = getLogger("olympe.drone") self._ip_addr_str = str(ip_addr) self._ip_addr = ip_addr.encode('utf-8') self._backend = Backend(name=name) self._thread_loop = self._backend._thread_loop self.video_buffer_queue_size = video_buffer_queue_size self._scheduler = Scheduler(self._thread_loop, name=self._name) self._scheduler.add_context("olympe.controller", self) self._media = None self._media_autoconnect = media_autoconnect self.enums = enums.ArsdkEnums.get() self.messages = OrderedDict() for id_, message_type in messages.ArsdkMessages.get().by_id.items(): message = message_type.new() self.messages[message.id] = message message._bind_send_command(self._send_command) for cmd_aliases in message.aliases: self.__dict__[cmd_aliases] = message.send self._decoding_errors = [] self.error_code_drones = ErrorCodeDrone() self._controller_state = ControllerState() self._connect_future = None self._disconnect_future = None self._device_conn_status = self._controller_state.device_conn_status self._device_states = self._controller_state.device_states self._piloting_command = self._controller_state.piloting_command self._is_skyctrl = is_skyctrl self._pdraw = None self._reset_instance() self._thread_loop.register_cleanup(self.destroy) self._declare_callbacks() self._piloting_timer = self._thread_loop.create_timer( self._piloting_timer_cb) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.destroy() def _declare_callbacks(self): self._device_cbs_cfg = od.struct_arsdk_device_conn_cbs.bind({ "connecting": self._connecting_cb, "connected": self._connected_cb, "disconnected": self._disconnected_cb, "canceled": self._canceled_cb, "link_status": self._link_status_cb, }) self._send_status = od.arsdk_cmd_itf_send_status_cb_t(self._cmd_itf_send_status_cb) self._send_status_userdata = {} self._userdata = ctypes.c_void_p() self._cmd_itf_cbs = od.struct_arsdk_cmd_itf_cbs.bind({ "dispose": self._dispose_cmd_cb, "recv_cmd": self._recv_cmd_cb, "send_status": self._send_status, }) @callback_decorator() def _connecting_cb(self, _arsdk_device, arsdk_device_info, _user_data): self.logger.info("Connecting to device: {}".format( od.string_cast(arsdk_device_info.contents.name))) @callback_decorator() def _connected_cb(self, _arsdk_device, arsdk_device_info, _user_data): device_name = od.string_cast(arsdk_device_info.contents.name) if self._device_name is None: self._device_name = device_name if self._name is None: self.logger = getLogger( "olympe.drone.{}".format(self._device_name)) self.logger.info("Connected to device: {}".format(device_name)) json_info = od.string_cast(arsdk_device_info.contents.json) try: self._controller_state.device_conn_status.device_infos["json"] = json.loads(json_info) self.logger.info( '%s' % pprint.pformat(self._controller_state.device_conn_status.device_infos)) except ValueError: self.logger.error( 'json contents cannot be parsed: {}'.format(json_info)) self._controller_state.device_conn_status.connected = True if not self._is_skyctrl and self._media_autoconnect: media_hostname = self._ip_addr_str if self._media is not None: self._media.shutdown() self._media = None self._media = Media( media_hostname, name=self._name, device_name=self._device_name, scheduler=self._scheduler ) self._media.async_connect() if self._connect_future is not None: self._connect_future.set_result(True) @callback_decorator() def _disconnected_cb(self, _arsdk_device, arsdk_device_info, _user_data): self.logger.info("Disconnected from device: {}".format( od.string_cast(arsdk_device_info.contents.name))) self._controller_state.device_conn_status.connected = False if self._disconnect_future is not None: self._disconnect_future.set_result(True) self._thread_loop.run_later(self._on_device_removed) @callback_decorator() def _canceled_cb(self, _arsdk_device, arsdk_device_info, reason, _user_data): reason_txt = od.string_cast( od.arsdk_conn_cancel_reason_str(reason)) self.logger.info( "Connection to device: {} has been canceled for reason: {}".format( od.string_cast(arsdk_device_info.contents.name), reason_txt)) if self._connect_future is not None: self._connect_future.set_result(False) self._thread_loop.run_later(self._on_device_removed) @callback_decorator() def _link_status_cb(self, _arsdk_device, _arsdk_device_info, status, _user_data): self.logger.info("Link status: {}".format(status)) if status == od.ARSDK_LINK_STATUS_KO: self._controller_state.device_conn_status.connected = False self._thread_loop.run_later(self._on_device_removed) @callback_decorator() def _recv_cmd_cb(self, _interface, command, _user_data): message_id = command.contents.id if message_id not in self.messages.keys(): feature_name, class_name, msg_id = ( messages.ArsdkMessages.get().unknown_message_info(message_id)) if feature_name is not None: if class_name is not None: scope = "{}.{}".format(feature_name, class_name) else: scope = feature_name self.logger.warning( "Unknown message id: {} in {}".format(msg_id, scope) ) else: self.logger.warning( "Unknown message id 0x{:08x}".format(message_id)) return message = self.messages[message_id] try: res, message_args = message._decode_args(command) except Exception as e: self.logger.exception("Failed to decode message {}".format(message)) self._decoding_errors.append(e) return if res != 0: msg = ("Unable to decode event, error: {} , id: {} , name: {}". format(res, command.contents.id, message.FullName)) self.logger.error(msg) self._decoding_errors.append(RuntimeError(msg)) try: message_event = message._event_from_args(*message_args) except Exception as e: self.logger.exception("Failed to decode message {}{}".format( message, message_args)) self._decoding_errors.append(e) return if message.message_type is messages.ArsdkMessageType.CMD: msg = (f"a commande message has been received " f"from the drone: {message_event}") self.logger.error(msg) self._decoding_errors.append(RuntimeError(msg)) return self._update_states(message, message_args, message_event) self.logger.log(message.loglevel, str(message_event)) if self._is_skyctrl and message_id == drone_manager.connection_state.id: if message_event._args["state"] == drone_manager_enums.connection_state.connected: self.logger.info("Skycontroller connected to drone") all_states_settings_commands = [ common.Common.AllStates, common.Settings.AllSettings] for all_states_settings_command in all_states_settings_commands: self._send_command( all_states_settings_command, _no_expect=True, _async=True, _ensure_connected=False ) if self._media_autoconnect: if self._media is not None: self._media.shutdown() self._media = None media_hostname = self._ip_addr_str + ":180" self._media = Media( media_hostname, name=self._name, device_name=self._device_name, scheduler=self._scheduler ) self._media.async_connect() if message_event._args["state"] == drone_manager_enums.connection_state.disconnecting: self.logger.info("Skycontroller disconnected from drone") if self._media is not None: self._media.shutdown() self._media = None self._scheduler.process_event(message_event) def _synchronize_clock(self): date_time = datetime.datetime.now( get_localzone()).strftime("%Y%m%dT%H%M%S%z") if not self._is_skyctrl: current_date_time = common.Common.CurrentDateTime else: current_date_time = skyctrl.Common.CurrentDateTime res = self._send_command( current_date_time, date_time, _ensure_connected=False, _async=True, _timeout=0.5 ) def _on_sync_done(res): if not res.success(): msg = "Time synchronization failed for {}".format( self._ip_addr) self.logger.warning(msg) else: self.logger.info("Synchronization of {} at {}".format( self._ip_addr, date_time)) res.add_done_callback(_on_sync_done) def _update_states(self, message, message_args, message_event): message._set_last_event(message_event) message_name = message.FULL_NAME callback_type = message.callback_type args_pos = message.args_pos message_args = OrderedDict(( (name.upper(), message_args[pos]) for name, pos in args_pos.items() )) if callback_type == messages.ArsdkMessageCallbackType.STANDARD: self._controller_state.device_states.states[message_name] = message_args elif callback_type == messages.ArsdkMessageCallbackType.MAP: key = message_args[message.key_name.upper()] self._controller_state.device_states.states[message_name][key] = message_args elif callback_type == messages.ArsdkMessageCallbackType.LIST: insert_pos = next(reversed(self._controller_state.device_states.states[message_name]), -1) + 1 self._controller_state.device_states.states[message_name][insert_pos] = message_args @callback_decorator() def _piloting_timer_cb(self, timer, _user_data): self.logger.debug("piloting timer callback: {}".format(timer)) if self._controller_state.device_conn_status.connected: self._send_piloting_command() @callback_decorator() def _dispose_cmd_cb(self, _interface, _user_data): self.logger.debug("Dispose callback") @callback_decorator() def _cmd_itf_send_status_cb(self, _interface, _command, status, done, userdata): status_repr = od.arsdk_cmd_itf_send_status__enumvalues.get( status, status) done = bool(done) send_status_userdata = py_object_cast(userdata) send_command_future, message, args = send_status_userdata self.logger.debug( f"Command send status: {message.fullName} " f"{status_repr}, done: {done}" ) if not done: return if status in ( od.ARSDK_CMD_ITF_SEND_STATUS_ACK_RECEIVED, od.ARSDK_CMD_ITF_SEND_STATUS_SENT): send_command_future.set_result(True) else: send_command_future.set_result(False) self.logger.error( "Command send status cancel/timeout: " f"{message.fullName} {status_repr}, done: {done}" ) del self._send_status_userdata[id(send_command_future)] def _destroy_pdraw(self): if self._pdraw is not None: self._pdraw.dispose() self._pdraw = None def _create_pdraw_interface(self): legacy_streaming = not ( self._is_skyctrl or od.arsdk_device_type__enumvalues[ self._device_type].startswith("ARSDK_DEVICE_TYPE_ANAFI") ) return Pdraw( name=self._name, device_name=self._device_name, legacy=legacy_streaming, buffer_queue_size=self.video_buffer_queue_size, ) @callback_decorator() def _enable_legacy_video_streaming_impl(self): try: ret = self._send_command_impl(ardrone3.mediaStreaming.VideoEnable, 1) except Exception as e: return makeReturnTuple( ErrorCodeDrone.ERROR_BAD_STATE, "MediaStreaming_VideoEnable 1 Failed {}".format(str(e)) ) if ret != ErrorCodeDrone.OK: return makeReturnTuple( ErrorCodeDrone.ERROR_BAD_STATE, "MediaStreaming_VideoEnable 1 Failed" ) else: return makeReturnTuple( ErrorCodeDrone.OK, "MediaStreaming_VideoEnable 1 Success" ) @callback_decorator()
BSD 3-Clause New or Revised License
decile-team/cords
cords/selectionstrategies/SSL/craigstrategy.py
CRAIGStrategy.compute_gamma
python
def compute_gamma(self, idxs): if self.selection_type in ['PerClass', 'PerBatch']: gamma = [0 for i in range(len(idxs))] best = self.dist_mat[idxs] rep = np.argmax(best, axis=0) for i in rep: gamma[i] += 1 elif self.selection_type == 'Supervised': gamma = [0 for i in range(len(idxs))] best = self.dist_mat[idxs] rep = np.argmax(best, axis=0) for i in range(rep.shape[1]): gamma[rep[0, i]] += 1 return gamma
Compute the gamma values for the indices. Parameters ---------- idxs: list The indices Returns ---------- gamma: list Gradient values of the input indices
https://github.com/decile-team/cords/blob/e10de177355a10e6931743401e80debd20e2f240/cords/selectionstrategies/SSL/craigstrategy.py#L178-L205
import numpy as np import torch, time, apricot, math from scipy.sparse import csr_matrix from .dataselectionstrategy import DataSelectionStrategy from torch.utils.data.sampler import SubsetRandomSampler class CRAIGStrategy(DataSelectionStrategy): def __init__(self, trainloader, valloader, model, tea_model, ssl_alg, loss, device, num_classes, linear_layer, if_convex, selection_type, logger, optimizer='lazy'): super().__init__(trainloader, valloader, model, tea_model, ssl_alg, num_classes, linear_layer, loss, device, logger) self.if_convex = if_convex self.selection_type = selection_type self.optimizer = optimizer self.dist_mat = None def distance(self, x, y, exp=2): n = x.size(0) m = y.size(0) d = x.size(1) x = x.unsqueeze(1).expand(n, m, d) y = y.unsqueeze(0).expand(n, m, d) dist = torch.pow(x - y, exp).sum(2) return dist def compute_score(self, model_params, tea_model_params, idxs): trainset = self.trainloader.sampler.data_source subset_loader = torch.utils.data.DataLoader(trainset, batch_size=self.trainloader.batch_size, shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True) self.model.load_state_dict(model_params) if self.tea_model is not None: self.tea_model.load_state_dict(tea_model_params) self.N = 0 g_is = [] if self.if_convex: for batch_idx, (ul_weak_aug, ul_strong_aug, _) in enumerate(subset_loader): if self.selection_type == 'PerBatch': self.N += 1 g_is.append(ul_strong_aug.view(ul_strong_aug.size()[0], -1).mean(dim=0).view(1, -1)) else: self.N += ul_strong_aug.size()[0] g_is.append(ul_strong_aug.view(ul_strong_aug.size()[0], -1)) else: embDim = self.model.get_embedding_dim() for batch_idx, (ul_weak_aug, ul_strong_aug, _) in enumerate(subset_loader): ul_weak_aug, ul_strong_aug = ul_weak_aug.to(self.device), ul_strong_aug.to(self.device) if self.selection_type == 'PerBatch': self.N += 1 else: self.N += ul_strong_aug.size()[0] loss, out, l1, _, _ = self.ssl_loss(ul_weak_data=ul_weak_aug, ul_strong_data=ul_strong_aug) loss = loss.sum() l0_grads = torch.autograd.grad(loss, out)[0] if self.linear_layer: l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1) l1_grads = l0_expand * l1.repeat(1, self.num_classes) if self.selection_type == 'PerBatch': g_is.append(torch.cat((l0_grads, l1_grads), dim=1).mean(dim=0).view(1, -1)) else: g_is.append(torch.cat((l0_grads, l1_grads), dim=1)) else: if self.selection_type == 'PerBatch': g_is.append(l0_grads.mean(dim=0).view(1, -1)) else: g_is.append(l0_grads) self.dist_mat = torch.zeros([self.N, self.N], dtype=torch.float32) first_i = True if self.selection_type == 'PerBatch': g_is = torch.cat(g_is, dim=0) self.dist_mat = self.distance(g_is, g_is).cpu() else: for i, g_i in enumerate(g_is, 0): if first_i: size_b = g_i.size(0) first_i = False for j, g_j in enumerate(g_is, 0): self.dist_mat[i * size_b: i * size_b + g_i.size(0), j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j).cpu() self.const = torch.max(self.dist_mat).item() self.dist_mat = (self.const - self.dist_mat).numpy()
MIT License
devopshq/vspheretools
pysphere/ZSI/TC.py
TypeCode.text_to_data
python
def text_to_data(self, text, elt, ps): raise EvaluateException("Unimplemented evaluation", ps.Backtrace(elt))
convert text into typecode specific data. Parameters: text -- text content elt -- the DOM element being parsed ps -- the ParsedSoap object.
https://github.com/devopshq/vspheretools/blob/10890423bfbba976e3ddee61204e9eed4b73fe92/pysphere/ZSI/TC.py#L127-L134
from inspect import isclass from pysphere.ZSI import _children, _child_elements, _floattypes, _stringtypes, _seqtypes, _find_attrNodeNS, _find_arraytype, _find_href, _find_encstyle, _resolve_prefix, _find_xsi_attr, _find_type, _get_element_nsuri_name, _get_idstr, _Node, EvaluateException, UNICODE_ENCODING, _valid_encoding, ParseException from pysphere.ZSI.wstools.Namespaces import SCHEMA, SOAP from pysphere.ZSI.wstools.Utility import SplitQName from pysphere.ZSI.wstools.logging import getLogger as _GetLogger import re, types, time, copy from base64 import decodestring as b64decode, encodestring as b64encode from urllib import unquote as urldecode, quote as urlencode from binascii import unhexlify as hexdecode, hexlify as hexencode try: from cStringIO import StringIO except ImportError: from StringIO import StringIO _is_xsd_or_soap_ns = lambda ns: ns in [ SCHEMA.XSD3, SOAP.ENC, SCHEMA.XSD1, SCHEMA.XSD2, ] _find_nil = lambda E: _find_xsi_attr(E, "null") or _find_xsi_attr(E, "nil") def _get_xsitype(pyclass): if hasattr(pyclass,'type') and isinstance(pyclass.type, _seqtypes): return pyclass.type elif hasattr(pyclass,'type') and hasattr(pyclass, 'schema'): return (pyclass.schema, pyclass.type) return (None,None) Nilled = None UNBOUNDED = 'unbounded' class TypeCode: tag = None type = (None,None) typechecks = True attribute_typecode_dict = None logger = _GetLogger('ZSI.TC.TypeCode') def __init__(self, pname=None, aname=None, minOccurs=1, maxOccurs=1, nillable=False, typed=True, unique=True, pyclass=None, attrs_aname='_attrs', **kw): if isinstance(pname, _seqtypes): self.nspname, self.pname = pname else: self.nspname, self.pname = None, pname if self.pname: self.pname = str(self.pname).split(':')[-1] self.aname = aname or self.pname self.minOccurs = minOccurs self.maxOccurs = maxOccurs self.nillable = nillable self.typed = typed self.unique = unique self.attrs_aname = attrs_aname self.pyclass = pyclass encoded = kw.get('encoded') if encoded is not None: self.nspname = kw['encoded'] def parse(self, elt, ps): raise EvaluateException("Unimplemented evaluation", ps.Backtrace(elt)) def serialize(self, elt, sw, pyobj, name=None, orig=None, **kw): raise EvaluateException("Unimplemented evaluation", sw.Backtrace(elt))
MIT License
googlecolab/colabtools
google/colab/output/_widgets.py
_widget_display_hook
python
def _widget_display_hook(msg): if not _installed_url: return msg content = msg.get('content', {}) if not content: return msg widget_data = content.get('data', {}).get(_WIDGET_MIME_TYPE) if not widget_data: return msg widget_metadata = content.setdefault('metadata', {}).setdefault(_WIDGET_MIME_TYPE, {}) widget_metadata['colab'] = {'custom_widget_manager': {'url': _installed_url,}} return msg
Display hook to enable custom widget manager info in the display item.
https://github.com/googlecolab/colabtools/blob/186f321a2f2412b8e130a0d13e8cf3332b71c212/google/colab/output/_widgets.py#L69-L84
import IPython as _IPython _supported_widgets_versions = { '5.0.0a': 'e680a8b83b2ea152', } _default_version = '5.0.0a' _installed_url = None def enable_custom_widget_manager(version=_default_version): version_hash = _supported_widgets_versions.get(version) if not version_hash: raise ValueError( 'Unknown widgets version: {version}'.format(version=version)) _install_custom_widget_manager( 'https://ssl.gstatic.com/colaboratory-static/widgets/colab-cdn-widget-manager/{version_hash}/manager.min.js' .format(version_hash=version_hash)) def disable_custom_widget_manager(): _install_custom_widget_manager(None) def _install_custom_widget_manager(url): global _installed_url if url and not _installed_url: _IPython.get_ipython().display_pub.register_hook(_widget_display_hook) elif not url and _installed_url: _IPython.get_ipython().display_pub.unregister_hook(_widget_display_hook) _installed_url = url _WIDGET_MIME_TYPE = 'application/vnd.jupyter.widget-view+json'
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/switch/template.py
SwitchTemplate.available
python
def available(self): return self._state is not None
If switch is available.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/switch/template.py#L141-L143
import asyncio import logging import voluptuous as vol from homeassistant.core import callback from homeassistant.components.switch import ( ENTITY_ID_FORMAT, SwitchDevice, PLATFORM_SCHEMA) from homeassistant.const import ( ATTR_FRIENDLY_NAME, CONF_VALUE_TEMPLATE, CONF_ICON_TEMPLATE, CONF_ENTITY_PICTURE_TEMPLATE, STATE_OFF, STATE_ON, ATTR_ENTITY_ID, CONF_SWITCHES, EVENT_HOMEASSISTANT_START) from homeassistant.exceptions import TemplateError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import async_generate_entity_id from homeassistant.helpers.event import async_track_state_change from homeassistant.helpers.script import Script _LOGGER = logging.getLogger(__name__) _VALID_STATES = [STATE_ON, STATE_OFF, 'true', 'false'] ON_ACTION = 'turn_on' OFF_ACTION = 'turn_off' SWITCH_SCHEMA = vol.Schema({ vol.Required(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_ICON_TEMPLATE): cv.template, vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template, vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA, vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA, vol.Optional(ATTR_FRIENDLY_NAME): cv.string, vol.Optional(ATTR_ENTITY_ID): cv.entity_ids }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SWITCHES): vol.Schema({cv.slug: SWITCH_SCHEMA}), }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): switches = [] for device, device_config in config[CONF_SWITCHES].items(): friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device) state_template = device_config[CONF_VALUE_TEMPLATE] icon_template = device_config.get(CONF_ICON_TEMPLATE) entity_picture_template = device_config.get( CONF_ENTITY_PICTURE_TEMPLATE) on_action = device_config[ON_ACTION] off_action = device_config[OFF_ACTION] entity_ids = (device_config.get(ATTR_ENTITY_ID) or state_template.extract_entities()) state_template.hass = hass if icon_template is not None: icon_template.hass = hass if entity_picture_template is not None: entity_picture_template.hass = hass switches.append( SwitchTemplate( hass, device, friendly_name, state_template, icon_template, entity_picture_template, on_action, off_action, entity_ids) ) if not switches: _LOGGER.error("No switches added") return False async_add_devices(switches) return True class SwitchTemplate(SwitchDevice): def __init__(self, hass, device_id, friendly_name, state_template, icon_template, entity_picture_template, on_action, off_action, entity_ids): self.hass = hass self.entity_id = async_generate_entity_id( ENTITY_ID_FORMAT, device_id, hass=hass) self._name = friendly_name self._template = state_template self._on_script = Script(hass, on_action) self._off_script = Script(hass, off_action) self._state = False self._icon_template = icon_template self._entity_picture_template = entity_picture_template self._icon = None self._entity_picture = None self._entities = entity_ids @asyncio.coroutine def async_added_to_hass(self): @callback def template_switch_state_listener(entity, old_state, new_state): self.async_schedule_update_ha_state(True) @callback def template_switch_startup(event): async_track_state_change( self.hass, self._entities, template_switch_state_listener) self.async_schedule_update_ha_state(True) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, template_switch_startup) @property def name(self): return self._name @property def is_on(self): return self._state @property def should_poll(self): return False @property
MIT License
p-christ/deep-reinforcement-learning-algorithms-with-pytorch
agents/actor_critic_agents/DDPG.py
DDPG.pick_action
python
def pick_action(self, state=None): if state is None: state = torch.from_numpy(self.state).float().unsqueeze(0).to(self.device) self.actor_local.eval() with torch.no_grad(): action = self.actor_local(state).cpu().data.numpy() self.actor_local.train() action = self.exploration_strategy.perturb_action_for_exploration_purposes({"action": action}) return action.squeeze(0)
Picks an action using the actor network and then adds some noise to it to ensure exploration
https://github.com/p-christ/deep-reinforcement-learning-algorithms-with-pytorch/blob/b338c87bebb672e39304e47e0eed55aeb462b243/agents/actor_critic_agents/DDPG.py#L50-L58
import torch import torch.nn.functional as functional from torch import optim from agents.Base_Agent import Base_Agent from utilities.data_structures.Replay_Buffer import Replay_Buffer from exploration_strategies.OU_Noise_Exploration import OU_Noise_Exploration class DDPG(Base_Agent): agent_name = "DDPG" def __init__(self, config): Base_Agent.__init__(self, config) self.hyperparameters = config.hyperparameters self.critic_local = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic") self.critic_target = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic") Base_Agent.copy_model_over(self.critic_local, self.critic_target) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=self.hyperparameters["Critic"]["learning_rate"], eps=1e-4) self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"], self.config.seed) self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor") self.actor_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor") Base_Agent.copy_model_over(self.actor_local, self.actor_target) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=self.hyperparameters["Actor"]["learning_rate"], eps=1e-4) self.exploration_strategy = OU_Noise_Exploration(self.config) def step(self): while not self.done: self.action = self.pick_action() self.conduct_action(self.action) if self.time_for_critic_and_actor_to_learn(): for _ in range(self.hyperparameters["learning_updates_per_learning_session"]): states, actions, rewards, next_states, dones = self.sample_experiences() self.critic_learn(states, actions, rewards, next_states, dones) self.actor_learn(states) self.save_experience() self.state = self.next_state self.global_step_number += 1 self.episode_number += 1 def sample_experiences(self): return self.memory.sample()
MIT License
luna-klatzer/openhiven.py
openhivenpy/gateway/messagebroker.py
DynamicEventBuffer.add_new_event
python
def add_new_event( self, data: dict, args: Optional[tuple] = None, kwargs: Optional[dict] = None ): if kwargs is None: kwargs: Dict = {} if args is None: args: Tuple = () self.append( { 'data': data, 'args': args, 'kwargs': kwargs } )
Adds a new event to the Buffer which will trigger the listeners assigned to the event :param data: The raw WebSocket data containing the information of the event :param args: Args of the Event that should be passed to the event listeners :param kwargs: Kwargs / named args of the Event that should be passed to the event listeners
https://github.com/luna-klatzer/openhiven.py/blob/9184d6a77bde0ee3847dcb9ea7d399217a36c95d/openhivenpy/gateway/messagebroker.py#L80-L107
from __future__ import annotations import asyncio import logging from typing import Optional, List, Coroutine, Tuple, Dict from typing import TYPE_CHECKING from .. import utils from ..base_types import HivenObject if TYPE_CHECKING: from .. import HivenClient from ..exceptions import EventConsumerLoopError, WorkerTaskError from ..events import DispatchEventListener __all__ = ['DynamicEventBuffer', 'MessageBroker'] logger = logging.getLogger(__name__) async def _wait_until_done(task: asyncio.Task) -> None: while not task.done(): await asyncio.sleep(.05) class DynamicEventBuffer(list, HivenObject): def __init__(self, event: str, *args, **kwargs): self.event = event super().__init__(*args, **kwargs) def __repr__(self): info = [ ('event', self.event) ] return '<{} {}>'.format(self.__class__.__name__, ' '.join('%s=%s' % t for t in info))
MIT License
ecshackweek/impedance.py
impedance/models/circuits/circuits.py
BaseCircuit.__init__
python
def __init__(self, initial_guess=[], constants=None, name=None): initial_guess = list(filter(None, initial_guess)) for i in initial_guess: if not isinstance(i, (float, int, np.int32, np.float64)): raise TypeError(f'value {i} in initial_guess is not a number') self.initial_guess = initial_guess if constants is not None: self.constants = constants else: self.constants = {} self.name = name self.parameters_ = None self.conf_ = None
Base constructor for any equivalent circuit model Parameters ---------- initial_guess: numpy array Initial guess of the circuit values constants : dict, optional Parameters and values to hold constant during fitting (e.g. {"R0": 0.1}) name : str, optional Name for the circuit
https://github.com/ecshackweek/impedance.py/blob/06fd26d694e5a97960ab465c22e50f90ab1d65ba/impedance/models/circuits/circuits.py#L14-L46
from .fitting import circuit_fit, buildCircuit from .fitting import calculateCircuitLength, check_and_eval from impedance.visualization import plot_altair, plot_bode, plot_nyquist from .elements import circuit_elements, get_element_from_name import json import matplotlib.pyplot as plt import numpy as np import warnings class BaseCircuit:
MIT License
oshlack/stretch
scripts/mosdepth_median.py
parse_args
python
def parse_args(): parser = argparse.ArgumentParser(description='Estimate allele lengths and find outliers at STR loci.') parser.add_argument( 'INFILE', type=str, help='mosdepth.global.dist.txt cumulative coverage output file') parser.add_argument( '--out', type=str, required=False, help='Output file name. Defaults to stdout.') return parser.parse_args()
Parse the input arguments, use '-h' for help
https://github.com/oshlack/stretch/blob/c5345e5dea4adfde790befb9903ec2d81ed5b2c1/scripts/mosdepth_median.py#L16-L25
import argparse import sys import pandas as pd import numpy as np __author__ = "Harriet Dashnow" __credits__ = ["Harriet Dashnow"] __license__ = "MIT" __version__ = "0.1.0" __email__ = "h.dashnow@gmail.com"
MIT License
yvzheng/pycwr
pycwr/core/PyartRadar.py
Radar.check_field_exists
python
def check_field_exists(self, field_name): if field_name not in self.fields: raise KeyError('Field not available: ' + field_name) return
Check that a field exists in the fields dictionary. If the field does not exist raise a KeyError. Parameters ---------- field_name : str Name of field to check.
https://github.com/yvzheng/pycwr/blob/91b59b3c5f7e7bd9a76ca95082a29d8617f12e64/pycwr/core/PyartRadar.py#L318-L332
from __future__ import print_function import numpy as np import sys from ..configure.pyart_config import get_metadata from ..configure.pyart_lazydict import LazyLoadDict from .transforms import antenna_vectors_to_cartesian, cartesian_to_geographic class Radar(object): def __init__(self, time, _range, fields, metadata, scan_type, latitude, longitude, altitude, sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index, sweep_end_ray_index, azimuth, elevation, altitude_agl=None, target_scan_rate=None, rays_are_indexed=None, ray_angle_res=None, scan_rate=None, antenna_transition=None, instrument_parameters=None, radar_calibration=None, rotation=None, tilt=None, roll=None, drift=None, heading=None, pitch=None, georefs_applied=None, ): if 'calendar' not in time: time['calendar'] = 'gregorian' self.time = time self.range = _range self.fields = fields self.metadata = metadata self.scan_type = scan_type self.latitude = latitude self.longitude = longitude self.altitude = altitude self.altitude_agl = altitude_agl self.sweep_number = sweep_number self.sweep_mode = sweep_mode self.fixed_angle = fixed_angle self.sweep_start_ray_index = sweep_start_ray_index self.sweep_end_ray_index = sweep_end_ray_index self.target_scan_rate = target_scan_rate self.rays_are_indexed = rays_are_indexed self.ray_angle_res = ray_angle_res self.azimuth = azimuth self.elevation = elevation self.scan_rate = scan_rate self.antenna_transition = antenna_transition self.rotation = rotation self.tilt = tilt self.roll = roll self.drift = drift self.heading = heading self.pitch = pitch self.georefs_applied = georefs_applied self.instrument_parameters = instrument_parameters self.radar_calibration = radar_calibration self.ngates = len(_range['data']) self.nrays = len(time['data']) self.nsweeps = len(sweep_number['data']) self.projection = {'proj': 'pyart_aeqd', '_include_lon_0_lat_0': True} self.init_rays_per_sweep() self.init_gate_x_y_z() self.init_gate_longitude_latitude() self.init_gate_altitude() def __getstate__(self): state = self.__dict__.copy() del state['rays_per_sweep'] del state['gate_x'] del state['gate_y'] del state['gate_z'] del state['gate_longitude'] del state['gate_latitude'] del state['gate_altitude'] return state def __setstate__(self, state): self.__dict__.update(state) self.init_rays_per_sweep() self.init_gate_x_y_z() self.init_gate_longitude_latitude() self.init_gate_altitude() def init_rays_per_sweep(self): lazydic = LazyLoadDict(get_metadata('rays_per_sweep')) lazydic.set_lazy('data', _rays_per_sweep_data_factory(self)) self.rays_per_sweep = lazydic def init_gate_x_y_z(self): gate_x = LazyLoadDict(get_metadata('gate_x')) gate_x.set_lazy('data', _gate_data_factory(self, 0)) self.gate_x = gate_x gate_y = LazyLoadDict(get_metadata('gate_y')) gate_y.set_lazy('data', _gate_data_factory(self, 1)) self.gate_y = gate_y gate_z = LazyLoadDict(get_metadata('gate_z')) gate_z.set_lazy('data', _gate_data_factory(self, 2)) self.gate_z = gate_z def init_gate_longitude_latitude(self): gate_longitude = LazyLoadDict(get_metadata('gate_longitude')) gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0)) self.gate_longitude = gate_longitude gate_latitude = LazyLoadDict(get_metadata('gate_latitude')) gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1)) self.gate_latitude = gate_latitude def init_gate_altitude(self): gate_altitude = LazyLoadDict(get_metadata('gate_altitude')) gate_altitude.set_lazy('data', _gate_altitude_data_factory(self)) self.gate_altitude = gate_altitude def _check_sweep_in_range(self, sweep): if sweep < 0 or sweep >= self.nsweeps: raise IndexError('Sweep out of range: ', sweep) return
MIT License
giswqs/geemap
geemap/common.py
upload_to_imgur
python
def upload_to_imgur(in_gif): import subprocess pkg_name = "imgur-uploader" if not is_tool(pkg_name): check_install(pkg_name) try: IMGUR_API_ID = os.environ.get("IMGUR_API_ID", None) IMGUR_API_SECRET = os.environ.get("IMGUR_API_SECRET", None) credentials_path = os.path.join( os.path.expanduser("~"), ".config/imgur_uploader/uploader.cfg" ) if ( (IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None) ) or os.path.exists(credentials_path): proc = subprocess.Popen(["imgur-uploader", in_gif], stdout=subprocess.PIPE) for _ in range(0, 2): line = proc.stdout.readline() print(line.rstrip().decode("utf-8")) else: print( "Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials" ) return except Exception as e: print(e)
Uploads an image to imgur.com Args: in_gif (str): The file path to the image.
https://github.com/giswqs/geemap/blob/ee39ca827a724691ebd76f57f1dc6fa73c1bb240/geemap/common.py#L525-L564
import csv import datetime import io import json import math import os import shutil import tarfile import urllib.request import zipfile import ee import ipywidgets as widgets from IPython.display import display from ipytree import Node, Tree def ee_initialize(token_name="EARTHENGINE_TOKEN"): if ee.data._credentials is None: try: ee_token = os.environ.get(token_name) if ee_token is not None: credential_file_path = os.path.expanduser("~/.config/earthengine/") if not os.path.exists(credential_file_path): credential = '{"refresh_token":"%s"}' % ee_token os.makedirs(credential_file_path, exist_ok=True) with open(credential_file_path + "credentials", "w") as file: file.write(credential) elif in_colab_shell(): if credentials_in_drive() and (not credentials_in_colab()): copy_credentials_to_colab() elif not credentials_in_colab: ee.Authenticate() if is_drive_mounted() and (not credentials_in_drive()): copy_credentials_to_drive() else: if is_drive_mounted(): copy_credentials_to_drive() ee.Initialize() except Exception: ee.Authenticate() ee.Initialize() def set_proxy(port=1080, ip="http://127.0.0.1"): import requests try: if not ip.startswith("http"): ip = "http://" + ip proxy = "{}:{}".format(ip, port) os.environ["HTTP_PROXY"] = proxy os.environ["HTTPS_PROXY"] = proxy a = requests.get("https://earthengine.google.com/") if a.status_code != 200: print( "Failed to connect to Earth Engine. Please double check the port number and ip address." ) except Exception as e: print(e) def in_colab_shell(): import sys if "google.colab" in sys.modules: return True else: return False def is_drive_mounted(): drive_path = "/content/drive/My Drive" if os.path.exists(drive_path): return True else: return False def credentials_in_drive(): credentials_path = "/content/drive/My Drive/.config/earthengine/credentials" if os.path.exists(credentials_path): return True else: return False def credentials_in_colab(): credentials_path = "/root/.config/earthengine/credentials" if os.path.exists(credentials_path): return True else: return False def copy_credentials_to_drive(): src = "/root/.config/earthengine/credentials" dst = "/content/drive/My Drive/.config/earthengine/credentials" wd = os.path.dirname(dst) if not os.path.exists(wd): os.makedirs(wd) shutil.copyfile(src, dst) def copy_credentials_to_colab(): src = "/content/drive/My Drive/.config/earthengine/credentials" dst = "/root/.config/earthengine/credentials" wd = os.path.dirname(dst) if not os.path.exists(wd): os.makedirs(wd) shutil.copyfile(src, dst) def check_install(package): import subprocess try: __import__(package) except ImportError: print(f"{package} is not installed. Installing ...") try: subprocess.check_call(["python", "-m", "pip", "install", package]) except Exception as e: print(f"Failed to install {package}") print(e) print(f"{package} has been installed successfully.") def update_package(): try: download_dir = os.path.join(os.path.expanduser("~"), "Downloads") if not os.path.exists(download_dir): os.makedirs(download_dir) clone_repo(out_dir=download_dir) pkg_dir = os.path.join(download_dir, "geemap-master") work_dir = os.getcwd() os.chdir(pkg_dir) if shutil.which("pip") is None: cmd = "pip3 install ." else: cmd = "pip install ." os.system(cmd) os.chdir(work_dir) print( "\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output" ) except Exception as e: raise Exception(e) def check_package(name, URL=""): try: __import__(name.lower()) except Exception: raise ImportError( f"{name} is not installed. Please install it before proceeding. {URL}" ) def clone_repo(out_dir=".", unzip=True): url = "https://github.com/giswqs/geemap/archive/master.zip" filename = "geemap-master.zip" download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip) def install_from_github(url): try: download_dir = os.path.join(os.path.expanduser("~"), "Downloads") if not os.path.exists(download_dir): os.makedirs(download_dir) repo_name = os.path.basename(url) zip_url = os.path.join(url, "archive/master.zip") filename = repo_name + "-master.zip" download_from_url( url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True ) pkg_dir = os.path.join(download_dir, repo_name + "-master") pkg_name = os.path.basename(url) work_dir = os.getcwd() os.chdir(pkg_dir) print(f"Installing {pkg_name}...") cmd = "pip install ." os.system(cmd) os.chdir(work_dir) print(f"{pkg_name} has been installed successfully.") except Exception as e: print(e) def check_git_install(): import webbrowser cmd = "git --version" output = os.popen(cmd).read() if "git version" in output: return True else: url = "https://git-scm.com/downloads" print(f"Git is not installed. Please download Git from {url} and install it.") webbrowser.open_new_tab(url) return False def clone_github_repo(url, out_dir): repo_name = os.path.basename(url) url_zip = url + "/archive/master.zip" if os.path.exists(out_dir): print( "The specified output directory already exists. Please choose a new directory." ) return parent_dir = os.path.dirname(out_dir) out_file_path = os.path.join(parent_dir, repo_name + ".zip") try: urllib.request.urlretrieve(url_zip, out_file_path) except Exception: print("The provided URL is invalid. Please double check the URL.") return with zipfile.ZipFile(out_file_path, "r") as zip_ref: zip_ref.extractall(parent_dir) src = out_file_path.replace(".zip", "-master") os.rename(src, out_dir) os.remove(out_file_path) def clone_google_repo(url, out_dir=None): repo_name = os.path.basename(url) if out_dir is None: out_dir = os.path.join(os.getcwd(), repo_name) if not os.path.exists(os.path.dirname(out_dir)): os.makedirs(os.path.dirname(out_dir)) if os.path.exists(out_dir): print( "The specified output directory already exists. Please choose a new directory." ) return if check_git_install(): cmd = f'git clone "{url}" "{out_dir}"' os.popen(cmd).read() def open_github(subdir=None): import webbrowser url = "https://github.com/giswqs/geemap" if subdir == "source": url += "/tree/master/geemap/" elif subdir == "examples": url += "/tree/master/examples" elif subdir == "tutorials": url += "/tree/master/tutorials" webbrowser.open_new_tab(url) def open_youtube(): import webbrowser url = "https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3" webbrowser.open_new_tab(url) def is_tool(name): return shutil.which(name) is not None def random_string(string_length=3): import random import string letters = string.ascii_lowercase return "".join(random.choice(letters) for i in range(string_length)) def open_image_from_url(url): import requests from PIL import Image try: response = requests.get(url) img = Image.open(io.BytesIO(response.content)) return img except Exception as e: print(e) def show_image(img_path, width=None, height=None): from IPython.display import display try: out = widgets.Output() out.clear_output(wait=True) display(out) with out: file = open(img_path, "rb") image = file.read() if (width is None) and (height is None): display(widgets.Image(value=image)) elif (width is not None) and (height is not None): display(widgets.Image(value=image, width=width, height=height)) else: print("You need set both width and height.") return except Exception as e: print(e) def has_transparency(img): if img.mode == "P": transparent = img.info.get("transparency", -1) for _, index in img.getcolors(): if index == transparent: return True elif img.mode == "RGBA": extrema = img.getextrema() if extrema[3][0] < 255: return True return False
MIT License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/models/api_key_update_req.py
ApiKeyUpdateReq.groups
python
def groups(self, groups): self._groups = groups
Sets the groups of this ApiKeyUpdateReq. A list of group IDs this API key belongs to. :param groups: The groups of this ApiKeyUpdateReq. :type: list[str]
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/iam/models/api_key_update_req.py#L70-L79
from pprint import pformat from six import iteritems import re class ApiKeyUpdateReq(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'groups': 'list[str]', 'name': 'str', 'owner': 'str', 'status': 'str' } attribute_map = { 'groups': 'groups', 'name': 'name', 'owner': 'owner', 'status': 'status' } def __init__(self, groups=None, name=None, owner=None, status=None): self._groups = groups self._name = name self._owner = owner self._status = status self.discriminator = None @property def groups(self): return self._groups @groups.setter
Apache License 2.0
eubr-bigsea/py-st-dbscan
python/src/coordinates.py
convert_to_utm
python
def convert_to_utm(df, src_epsg, dst_epsg, col_lat, col_lon, alias_lon=None, alias_lat=None): old_proj = pyproj.Proj(src_epsg, preserve_units=True) new_proj = pyproj.Proj(dst_epsg, preserve_units=True) print("Formal definition string for the old projection:", old_proj.definition_string()) print("Formal definition string for the new projection:", new_proj.definition_string()) lon = df[col_lon].values lat = df[col_lat].values x1, y1 = old_proj(lon, lat) x2, y2 = pyproj.transform(old_proj, new_proj, x1, y1) if alias_lon is None: alias_lon = col_lon if alias_lat is None: alias_lat = col_lat df[alias_lon] = x2 df[alias_lat] = y2 return df
Cython wrapper to converts from geographic (longitude,latitude) to native map projection (x,y) coordinates. Values of x and y are given in meters. OpenStreetMap is in a projected coordinate system that is based on the wgs84 datum. (EPSG 4326) :param df: DataFrame input :param src_epsg: Geographic coordinate system used in the source points; :param dst_epsg: UTM coordinate system to convert the input; :param col_lat: Latitude column name; :param col_lon: Longitude column name; :param alias_lon: Longitude column name (default, replace the input); :param alias_lat: Latitude column name (default, replace the input);
https://github.com/eubr-bigsea/py-st-dbscan/blob/297ccef21266dbd6b7e416c0ddf915f492ca8a64/python/src/coordinates.py#L7-L46
import pyproj
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/climate/hive.py
HiveClimateEntity.turn_aux_heat_off
python
def turn_aux_heat_off(self): if self.device_type == "Heating": self.session.heating.turn_boost_off(self.node_id) elif self.device_type == "HotWater": self.session.hotwater.turn_boost_off(self.node_id) for entity in self.session.entities: entity.handle_update(self.data_updatesource)
Turn auxiliary heater off.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/climate/hive.py#L174-L182
from homeassistant.components.climate import ( ClimateDevice, STATE_AUTO, STATE_HEAT, STATE_OFF, STATE_ON, SUPPORT_AUX_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from homeassistant.components.hive import DATA_HIVE DEPENDENCIES = ['hive'] HIVE_TO_HASS_STATE = {'SCHEDULE': STATE_AUTO, 'MANUAL': STATE_HEAT, 'ON': STATE_ON, 'OFF': STATE_OFF} HASS_TO_HIVE_STATE = {STATE_AUTO: 'SCHEDULE', STATE_HEAT: 'MANUAL', STATE_ON: 'ON', STATE_OFF: 'OFF'} SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_AUX_HEAT) def setup_platform(hass, config, add_devices, discovery_info=None): if discovery_info is None: return session = hass.data.get(DATA_HIVE) add_devices([HiveClimateEntity(session, discovery_info)]) class HiveClimateEntity(ClimateDevice): def __init__(self, hivesession, hivedevice): self.node_id = hivedevice["Hive_NodeID"] self.node_name = hivedevice["Hive_NodeName"] self.device_type = hivedevice["HA_DeviceType"] if self.device_type == "Heating": self.thermostat_node_id = hivedevice["Thermostat_NodeID"] self.session = hivesession self.attributes = {} self.data_updatesource = '{}.{}'.format(self.device_type, self.node_id) if self.device_type == "Heating": self.modes = [STATE_AUTO, STATE_HEAT, STATE_OFF] elif self.device_type == "HotWater": self.modes = [STATE_AUTO, STATE_ON, STATE_OFF] self.session.entities.append(self) @property def supported_features(self): return SUPPORT_FLAGS def handle_update(self, updatesource): if '{}.{}'.format(self.device_type, self.node_id) not in updatesource: self.schedule_update_ha_state() @property def name(self): friendly_name = "Climate Device" if self.device_type == "Heating": friendly_name = "Heating" if self.node_name is not None: friendly_name = '{} {}'.format(self.node_name, friendly_name) elif self.device_type == "HotWater": friendly_name = "Hot Water" return friendly_name @property def device_state_attributes(self): return self.attributes @property def temperature_unit(self): return TEMP_CELSIUS @property def current_temperature(self): if self.device_type == "Heating": return self.session.heating.current_temperature(self.node_id) @property def target_temperature(self): if self.device_type == "Heating": return self.session.heating.get_target_temperature(self.node_id) @property def min_temp(self): if self.device_type == "Heating": return self.session.heating.min_temperature(self.node_id) @property def max_temp(self): if self.device_type == "Heating": return self.session.heating.max_temperature(self.node_id) @property def operation_list(self): return self.modes @property def current_operation(self): if self.device_type == "Heating": currentmode = self.session.heating.get_mode(self.node_id) elif self.device_type == "HotWater": currentmode = self.session.hotwater.get_mode(self.node_id) return HIVE_TO_HASS_STATE.get(currentmode) def set_operation_mode(self, operation_mode): new_mode = HASS_TO_HIVE_STATE.get(operation_mode) if self.device_type == "Heating": self.session.heating.set_mode(self.node_id, new_mode) elif self.device_type == "HotWater": self.session.hotwater.set_mode(self.node_id, new_mode) for entity in self.session.entities: entity.handle_update(self.data_updatesource) def set_temperature(self, **kwargs): new_temperature = kwargs.get(ATTR_TEMPERATURE) if new_temperature is not None: if self.device_type == "Heating": self.session.heating.set_target_temperature(self.node_id, new_temperature) for entity in self.session.entities: entity.handle_update(self.data_updatesource) @property def is_aux_heat_on(self): boost_status = None if self.device_type == "Heating": boost_status = self.session.heating.get_boost(self.node_id) elif self.device_type == "HotWater": boost_status = self.session.hotwater.get_boost(self.node_id) return boost_status == "ON" def turn_aux_heat_on(self): target_boost_time = 30 if self.device_type == "Heating": curtemp = self.session.heating.current_temperature(self.node_id) curtemp = round(curtemp * 2) / 2 target_boost_temperature = curtemp + 0.5 self.session.heating.turn_boost_on(self.node_id, target_boost_time, target_boost_temperature) elif self.device_type == "HotWater": self.session.hotwater.turn_boost_on(self.node_id, target_boost_time) for entity in self.session.entities: entity.handle_update(self.data_updatesource)
MIT License
identitypython/pysaml2
example/idp2/idp_uwsgi.py
SSO.redirect
python
def redirect(self): logger.info("--- In SSO Redirect ---") saml_msg = self.unpack_redirect() try: _key = saml_msg["key"] saml_msg = IDP.ticket[_key] self.req_info = saml_msg["req_info"] del IDP.ticket[_key] except KeyError: try: self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"], BINDING_HTTP_REDIRECT) except KeyError: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) _req = self.req_info.message if "SigAlg" in saml_msg and "Signature" in saml_msg: issuer = _req.issuer.text _certs = IDP.metadata.certs(issuer, "any", "signing") verified_ok = False for cert in _certs: if verify_redirect_signature(saml_msg, IDP.sec.sec_backend, cert): verified_ok = True break if not verified_ok: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) if self.user: if _req.force_authn: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT) else: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
This is the HTTP-redirect endpoint
https://github.com/identitypython/pysaml2/blob/f12ade09aa89211c42b7dc6ed94728f8aa69cffb/example/idp2/idp_uwsgi.py#L342-L389
import importlib import argparse import base64 import re import logging import time from hashlib import sha1 from urlparse import parse_qs from Cookie import SimpleCookie import os from saml2.authn import is_equal from saml2.profile import ecp from saml2 import server from saml2 import BINDING_HTTP_ARTIFACT from saml2 import BINDING_URI from saml2 import BINDING_PAOS from saml2 import BINDING_SOAP from saml2 import BINDING_HTTP_REDIRECT from saml2 import BINDING_HTTP_POST from saml2 import time_util from saml2.authn_context import AuthnBroker from saml2.authn_context import PASSWORD from saml2.authn_context import UNSPECIFIED from saml2.authn_context import authn_context_class_ref from saml2.httputil import Response from saml2.httputil import NotFound from saml2.httputil import geturl from saml2.httputil import get_post from saml2.httputil import Redirect from saml2.httputil import Unauthorized from saml2.httputil import BadRequest from saml2.httputil import ServiceError from saml2.ident import Unknown from saml2.metadata import create_metadata_string from saml2.s_utils import rndstr from saml2.s_utils import exception_trace from saml2.s_utils import UnknownPrincipal from saml2.s_utils import UnsupportedBinding from saml2.s_utils import PolicyError from saml2.sigver import verify_redirect_signature from saml2.sigver import encrypt_cert_from_item logger = logging.getLogger("saml2.idp") class Cache(object): def __init__(self): self.user2uid = {} self.uid2user = {} def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"): if timeout == "now": return time_util.instant(tformat) elif timeout == "dawn": return time.strftime(tformat, time.gmtime(0)) else: return time_util.in_a_while(minutes=timeout, format=tformat) def get_eptid(idp, req_info, session): return idp.eptid.get(idp.config.entityid, req_info.sender(), session["permanent_id"], session["authn_auth"]) def dict2list_of_tuples(d): return [(k, v) for k, v in d.items()] class Service(object): def __init__(self, environ, start_response, user=None): self.environ = environ logger.debug("ENVIRON: %s", environ) self.start_response = start_response self.user = user def unpack_redirect(self): if "QUERY_STRING" in self.environ: _qs = self.environ["QUERY_STRING"] return dict([(k, v[0]) for k, v in parse_qs(_qs).items()]) else: return None def unpack_post(self): _dict = parse_qs(get_post(self.environ)) logger.debug("unpack_post:: %s", _dict) try: return dict([(k, v[0]) for k, v in _dict.items()]) except Exception: return None def unpack_soap(self): try: query = get_post(self.environ) return {"SAMLRequest": query, "RelayState": ""} except Exception: return None def unpack_either(self): if self.environ["REQUEST_METHOD"] == "GET": _dict = self.unpack_redirect() elif self.environ["REQUEST_METHOD"] == "POST": _dict = self.unpack_post() else: _dict = None logger.debug("_dict: %s", _dict) return _dict def operation(self, saml_msg, binding): logger.debug("_operation: %s", saml_msg) if not saml_msg or not 'SAMLRequest' in saml_msg: resp = BadRequest('Error parsing request or no request') return resp(self.environ, self.start_response) else: try: _encrypt_cert = encrypt_cert_from_item( saml_msg["req_info"].message) return self.do(saml_msg["SAMLRequest"], binding, saml_msg["RelayState"], encrypt_cert=_encrypt_cert) except KeyError: return self.do(saml_msg["SAMLRequest"], binding) def artifact_operation(self, saml_msg): if not saml_msg: resp = BadRequest("Missing query") return resp(self.environ, self.start_response) else: request = IDP.artifact2message(saml_msg["SAMLart"], "spsso") try: return self.do(request, BINDING_HTTP_ARTIFACT, saml_msg["RelayState"]) except KeyError: return self.do(request, BINDING_HTTP_ARTIFACT) def response(self, binding, http_args): if binding == BINDING_HTTP_ARTIFACT: resp = Redirect() else: resp = Response(http_args["data"], headers=http_args["headers"]) return resp(self.environ, self.start_response) def do(self, query, binding, relay_state="", encrypt_cert=None): pass def redirect(self): _dict = self.unpack_redirect() return self.operation(_dict, BINDING_HTTP_REDIRECT) def post(self): _dict = self.unpack_post() return self.operation(_dict, BINDING_HTTP_POST) def artifact(self): _dict = self.unpack_either() return self.artifact_operation(_dict) def soap(self): logger.debug("- SOAP -") _dict = self.unpack_soap() logger.debug("_dict: %s", _dict) return self.operation(_dict, BINDING_SOAP) def uri(self): _dict = self.unpack_either() return self.operation(_dict, BINDING_SOAP) def not_authn(self, key, requested_authn_context): ruri = geturl(self.environ, query=False) return do_authentication(self.environ, self.start_response, authn_context=requested_authn_context, key=key, redirect_uri=ruri) REPOZE_ID_EQUIVALENT = "uid" FORM_SPEC = """<form name="myform" method="post" action="%s"> <input type="hidden" name="SAMLResponse" value="%s" /> <input type="hidden" name="RelayState" value="%s" /> </form>""" class AuthenticationNeeded(Exception): def __init__(self, authn_context=None, *args, **kwargs): Exception.__init__(*args, **kwargs) self.authn_context = authn_context class SSO(Service): def __init__(self, environ, start_response, user=None): Service.__init__(self, environ, start_response, user) self.binding = "" self.response_bindings = None self.resp_args = {} self.binding_out = None self.destination = None self.req_info = None self.op_type = "" def verify_request(self, query, binding): resp_args = {} if not query: logger.info("Missing QUERY") resp = Unauthorized('Unknown user') return resp_args, resp(self.environ, self.start_response) if not self.req_info: self.req_info = IDP.parse_authn_request(query, binding) logger.info("parsed OK") _authn_req = self.req_info.message logger.debug("%s", _authn_req) try: self.binding_out, self.destination = IDP.pick_binding( "assertion_consumer_service", bindings=self.response_bindings, entity_id=_authn_req.issuer.text) except Exception as err: logger.error("Couldn't find receiver endpoint: %s", err) raise logger.debug("Binding: %s, destination: %s", self.binding_out, self.destination) resp_args = {} try: resp_args = IDP.response_args(_authn_req) _resp = None except UnknownPrincipal as excp: _resp = IDP.create_error_response(_authn_req.id, self.destination, excp) except UnsupportedBinding as excp: _resp = IDP.create_error_response(_authn_req.id, self.destination, excp) return resp_args, _resp def do(self, query, binding_in, relay_state="", encrypt_cert=None): try: resp_args, _resp = self.verify_request(query, binding_in) except UnknownPrincipal as excp: logger.error("UnknownPrincipal: %s", excp) resp = ServiceError("UnknownPrincipal: %s" % (excp,)) return resp(self.environ, self.start_response) except UnsupportedBinding as excp: logger.error("UnsupportedBinding: %s", excp) resp = ServiceError("UnsupportedBinding: %s" % (excp,)) return resp(self.environ, self.start_response) if not _resp: identity = USERS[self.user].copy() logger.info("Identity: %s", identity) if REPOZE_ID_EQUIVALENT: identity[REPOZE_ID_EQUIVALENT] = self.user try: try: metod = self.environ["idp.authn"] except KeyError: pass else: resp_args["authn"] = metod _resp = IDP.create_authn_response( identity, userid=self.user, encrypt_cert=encrypt_cert, **resp_args) except Exception as excp: logging.error(exception_trace(excp)) resp = ServiceError("Exception: %s" % (excp,)) return resp(self.environ, self.start_response) logger.info("AuthNResponse: %s", _resp) if self.op_type == "ecp": kwargs = {"soap_headers": [ ecp.Response( assertion_consumer_service_url=self.destination)]} else: kwargs = {} http_args = IDP.apply_binding(self.binding_out, "%s" % _resp, self.destination, relay_state, response=True, **kwargs) logger.debug("HTTPargs: %s", http_args) return self.response(self.binding_out, http_args) def _store_request(self, saml_msg): logger.debug("_store_request: %s", saml_msg) key = sha1(saml_msg["SAMLRequest"]).hexdigest() IDP.ticket[key] = saml_msg return key
Apache License 2.0
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/dashboard_info.py
DashboardInfo.assigned_customers
python
def assigned_customers(self, assigned_customers): self._assigned_customers = assigned_customers
Sets the assigned_customers of this DashboardInfo. :param assigned_customers: The assigned_customers of this DashboardInfo. # noqa: E501 :type: list[ShortCustomerInfo]
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/dashboard_info.py#L101-L109
import pprint import re import six class DashboardInfo(object): swagger_types = { 'assigned_customers': 'list[ShortCustomerInfo]', 'created_time': 'int', 'customer_id': 'CustomerId', 'image': 'str', 'mobile_hide': 'bool', 'mobile_order': 'int', 'name': 'str', 'owner_id': 'EntityId', 'tenant_id': 'TenantId', 'title': 'str' } attribute_map = { 'assigned_customers': 'assignedCustomers', 'created_time': 'createdTime', 'customer_id': 'customerId', 'image': 'image', 'mobile_hide': 'mobileHide', 'mobile_order': 'mobileOrder', 'name': 'name', 'owner_id': 'ownerId', 'tenant_id': 'tenantId', 'title': 'title' } def __init__(self, assigned_customers=None, created_time=None, customer_id=None, image=None, mobile_hide=None, mobile_order=None, name=None, owner_id=None, tenant_id=None, title=None): self._assigned_customers = None self._created_time = None self._customer_id = None self._image = None self._mobile_hide = None self._mobile_order = None self._name = None self._owner_id = None self._tenant_id = None self._title = None self.discriminator = None if assigned_customers is not None: self.assigned_customers = assigned_customers if created_time is not None: self.created_time = created_time if customer_id is not None: self.customer_id = customer_id if image is not None: self.image = image if mobile_hide is not None: self.mobile_hide = mobile_hide if mobile_order is not None: self.mobile_order = mobile_order if name is not None: self.name = name if owner_id is not None: self.owner_id = owner_id if tenant_id is not None: self.tenant_id = tenant_id if title is not None: self.title = title @property def assigned_customers(self): return self._assigned_customers @assigned_customers.setter
Apache License 2.0
abatten/fruitbat
fruitbat/_frb.py
Frb._set_value_units
python
def _set_value_units(self, value, unit=None, non_negative=False): if not isinstance(value, u.Quantity): if value is None: var = None elif non_negative and value < 0.0: raise ValueError("Value must be greater than zero.") elif unit is None: var = value * u.dimensionless_unscaled else: var = value * unit else: if value.unit.is_equivalent(unit): var = value else: err_msg = ("Quantity expected to have units of {} instead was " "passed units {} which is not convertable".format( unit, value.unit)) raise ValueError(err_msg) return var
Converts ``value`` into a :obj:`~astropy.unit.Quantity` with units. Also checks if ``value`` has existing units and the units are convertable with ``unit``. If ``value`` is a :obj:`~astropy.unit.Quantity` with convertable units then ``value`` is returned unchanged. Parameters ---------- value: float, int, or :obj:`~astropy.unit.Quantity` The input value. unit: :obj:`astropy.unit` or None The unit to set for ``value``. If ``unit = None`` then ``value`` will become a dimensionless quantity. non_negative: bool, optional Raise an error if the value is negative. This is used to verify values that can only be positive are correctly specified. Default: *False* Return ------ var : :obj:`~astropy.unit.Quantity` or None The output quantity with units. If ``value=None`` returns *None*
https://github.com/abatten/fruitbat/blob/660ae44eb20319b0456f929786a2592e78cce075/fruitbat/_frb.py#L879-L933
import numpy as np import astropy.coordinates as coord from astropy.coordinates import SkyCoord from astropy.time import Time import astropy.units as u import h5py from e13tools import docstring_substitute, docstring_copy import pygedm from fruitbat import cosmologies, methods, table, utils, plot __all__ = ["Frb"] class Frb(object): def __init__(self, dm, name=None, raj=None, decj=None, gl=None, gb=None, dm_galaxy=0.0, dm_excess=None, z_host=None, dm_host_est=0.0, dm_host_loc=0.0, dm_index=None, scatt_index=None, snr=None, width=None, peak_flux=None, fluence=None, obs_bandwidth=None, obs_freq_central=None, utc=None, **kwargs): self.dm = dm self.name = name self.raj = raj self.decj = decj self.gl = gl self.gb = gb self.dm_galaxy = dm_galaxy if dm_excess is None: self.calc_dm_excess() else: self.dm_excess = dm_excess self.z_host = z_host self.dm_host_est = dm_host_est self.dm_host_loc = dm_host_loc self.dm_index = dm_index self.scatt_index = scatt_index self.snr = snr self.width = width self.peak_flux = peak_flux self.fluence = fluence self.obs_bandwidth = obs_bandwidth self.obs_freq_central = obs_freq_central self.utc = utc self.z = None self.dm_igm = None self.cosmology = None self.method = None self.dm_galaxy_model = None self.z_conf_int_lower = None self.z_conf_int_upper = None self.tau_sc = None if ((self.fluence is None) and (peak_flux is not None and width is not None)): self.calc_fluence() if (raj and decj) or (gl and gb): if 'frame' in kwargs: self.skycoords = self.calc_skycoords(frame=kwargs["frame"]) else: self.skycoords = self.calc_skycoords() self.raj = self.skycoords.transform_to('icrs').ra self.decj = self.skycoords.transform_to('icrs').dec self.gl = self.skycoords.transform_to('galactic').l self.gb = self.skycoords.transform_to('galactic').b else: self.skycoords = None def __repr__(self): frb_repr = [] var_dict = vars(self) for var in var_dict: if var[1:] == "skycoords": continue var_name = var[1:] if isinstance(var_dict[var], u.Quantity): var_val = var_dict[var].value else: var_val = var_dict[var] frb_repr.append("{}={}".format(var_name, var_val)) return "Frb({})".format(", ".join(map(str, frb_repr))) @docstring_substitute(meth=methods.available_methods(), cosmo=cosmologies.available_cosmologies()) def calc_redshift(self, method='Batten2021', cosmology="Planck18", subtract_host=False, lookup_table=None): utils.check_type("subtract_host", subtract_host, bool) if subtract_host: input_dm = self.dm_excess - self.dm_host_est print(input_dm) else: input_dm = self.dm_excess if lookup_table is not None: raise NotImplementedError("Not made yet!") if method not in methods.available_methods(): err_msg = ("Method '{}' is not a valid method. " "Valid methods are: {}".format(method, methods.available_methods())) raise ValueError(err_msg) if method in methods.methods_hydrodynamic(): zvals, pdf, dz = self.calc_redshift_pdf(method=method) z_median = utils.calc_median_from_pdf(zvals, pdf) self.z = z_median self.method = method if method == "Batten2020": self.cosmology = "EAGLE" elif method in methods.methods_analytic(): if cosmology in cosmologies.available_cosmologies(): table_name = table.get_table_path(method) self.z = table.get_z_from_table(input_dm, table_name, cosmology) self.cosmology = cosmology self.method = method else: err_msg = ("Cosmology '{}' is not a valid cosmology. Valid " "cosmologies are: {}".format(cosmology, cosmologies.available_cosmologies())) raise ValueError(err_msg) else: table_name = utils.get_path_to_file_from_here("{}.hdf5".format(method), subdirs=["data"]) self.z = table.get_z_from_table(input_dm, table_name) self.cosmology = cosmology self.method = method return self.z def calc_redshift_pdf(self, method="Batten2021", cosmology="Planck18", prior="uniform", subtract_host=False, lookup_table=None): if method == "Batten2021": filename = utils.get_path_to_file_from_here("Batten2021.hdf5", subdirs=["data"]) elif method in methods.methods_analytic(): filename = "{}.hdf5".format(method) filename = utils.get_path_to_file_from_here(filename, subdirs=["data"]) raise NotImplementedError("Getting PDFs from analytic moddels has not been implemented yet!") else: raise NotImplementedError("This has not been implemented yet!") with h5py.File(filename, "r") as data: DMzHist = data["DMz_hist"][:] redshift_bin_widths = data["Redshift_Bin_Widths"][:] redshifts = data["Redshifts_Bin_Edges"][1:] DMBins = data["DM_Bin_Edges"][:] max_bin_idx = np.where(self.dm_excess.value <= DMBins)[0][0] prev_bin_idx = max_bin_idx - 1 hist_higher_dm = DMzHist[max_bin_idx] hist_lower_dm = DMzHist[prev_bin_idx] pdf2 = utils.normalise_to_pdf(hist_higher_dm, bin_widths=redshift_bin_widths) pdf1 = utils.normalise_to_pdf(hist_lower_dm, bin_widths=redshift_bin_widths) DMlower, DMhigher = DMBins[prev_bin_idx], DMBins[max_bin_idx] lin_interp_pdf = utils.linear_interpolate_pdfs(self.dm_excess.value, (DMlower, DMhigher), (pdf1, pdf2)) prior = utils.redshift_prior(redshifts, prior=prior) z_pdf = lin_interp_pdf * prior z_bins = redshifts dz = redshift_bin_widths self.z_bins = z_bins self.z_pdf = z_pdf self.dz = dz return z_bins, z_pdf, dz @docstring_copy(plot.redshift_pdf) def plot_redshift_pdf(self, *args, **kwargs): return plot.redshift_pdf(self, *args, **kwargs) @docstring_substitute(meth=methods.available_methods(), cosmo=cosmologies.available_cosmologies()) def calc_redshift_conf_int(self, method="Batten2021", sigma=1, scatter_percentage=0, **calc_redshift_kwargs): redshift = self.calc_redshift(**calc_redshift_kwargs) if method in methods.methods_hydrodynamic(): if method == "Batten2021": zvals, pdf, dz = self.calc_redshift_pdf(method="Batten2021") conf_lower_lim, conf_upper_lim = utils.sigma_to_pdf_percentiles(sigma=sigma) conf_int_lower = utils.calc_z_from_pdf_percentile(zvals, pdf, percentile=conf_lower_lim) conf_int_upper = utils.calc_z_from_pdf_percentile(zvals, pdf, percentile=conf_upper_lim) self.z_conf_int_lower = conf_int_lower self.z_conf_int_upper = conf_int_upper return self.z, (self.z_conf_int_lower, self.z_conf_int_upper) def calc_skycoords(self, frame=None): if self.raj is not None and self.decj is not None: if frame is None: frame = "icrs" skycoords = SkyCoord(ra=self.raj, dec=self.decj, frame=frame, unit=(u.hourangle, u.deg)) elif self.gl is not None and self.gb is not None: if frame is None: frame = "galactic" skycoords = SkyCoord(self.gl, self.gb, frame=frame, unit=u.deg) else: raise ValueError("To calculate skycoords either (raj and decj)" "or (gl, gb) must be provided") return skycoords def calc_dm_excess(self): dm_excess = self.dm.value - self.dm_galaxy.value if dm_excess < 0: print("dm_excess < 0: This implies that the DM estimate " "from the Milky Way is higher than the observed DM. " "Setting dm_excess = 0") self.dm_excess = 0 else: self.dm_excess = dm_excess return dm_excess def calc_dm_galaxy(self, model='ymw16', include_halo=False, return_tau_sc=False): YMW16_options = ["ymw16", "YMW16"] NE2001_options = ["ne2001", "NE2001"] if model in YMW16_options: model = 'YMW16' elif model in NE2001_options: model = 'NE2001' else: raise ValueError("'{}' is not a valid galactic DM model".format(model)) coord_list = [self.skycoords, self.raj, self.decj, self.gl, self.gb] if all(val is None for val in coord_list): raise ValueError("""Can not calculate dm_galaxy since coordinates for FRB burst were not provided. Please provide (raj, decj) or (gl, gb) coordinates.""") elif (self.skycoords is None and (self.raj is not None and self.decj is not None) or (self.gl is not None and self.gb is not None)): self._skycoords = self.calc_skycoords() dm_galaxy, tau_sc = pygedm.dist_to_dm( gl=self._skycoords.galactic.l, gb=self._skycoords.galactic.b, dist=25000.0, method=model) if include_halo: self.galaxy_halo = pygedm.calculate_halo_dm( gl=self._skycoords.galactic.l, gb=self._skycoords.galactic.b, method='yt2020' ) else: self.galaxy_halo = 0 * u.pc * u.cm**(-3) self.dm_galaxy_model = model self.dm_galaxy = dm_galaxy.value + self.galaxy_halo.value self.tau_sc = tau_sc.value self.calc_dm_excess() if return_tau_sc: return self.dm_galaxy, self.tau_sc else: return self.dm_galaxy def calc_dm_igm(self): if self.z_host is None: err_msg = ("z_host is None. Provide a non zero value for the " "FRB host redshift") raise ValueError(err_msg) if self.dm_host_loc == 0.0: err_msg = ("dm_host_loc = 0. The dm_igm will be the same as " "dm_excess. Provide a non-zero value for dm_host_loc") raise ValueError(err_msg) dm_igm = self.dm_excess - (self.dm_host_loc / (1 + self.z_host)) self.dm_igm = dm_igm.value return self.dm_igm def calc_fluence(self): if (self.width is None) or (self.peak_flux is None): err_msg = ("calc_fluence requires both width and peak_flux " "to not be None") raise ValueError(err_msg) fluence = self.width * self.peak_flux self.fluence = fluence.value return fluence def calc_luminosity_distance(self): if self.z is None: raise ValueError( """ Can not calculate luminosity distance without a redshift. Use calc_redshift() to calculate the FRB redshift or provide a value for z_host. """) cosmo = cosmologies.cosmology_functions()[self.cosmology] return cosmo.luminosity_distance(self.z) def calc_comoving_distance(self): if self.z is not None: z_sample = self.z elif self.z_host is not None: z_sample = self.z_host else: raise ValueError( """ Can not calculate comoving distance without a redshift. Use calc_redshift() to calculate the FRB redshift or provide a value for z_host. """) cosmo = cosmologies.cosmology_functions()[self.cosmology] return cosmo.comoving_distance(z_sample) def calc_luminosity(self, use_bandwidth=False): D = self.calc_luminosity_distance() if self.peak_flux is None: err_msg = ("Can not calculate energy without peak_flux. Provide " "peak_flux before calculating luminosity.") raise ValueError(err_msg) else: S = self.peak_flux if use_bandwidth: if self.obs_bandwidth is None: err_msg = ("Can not calculate energy without observing " "bandwidth. Provide obs_bandwidth before " "calculating energy.") raise ValueError(err_msg) B = self.obs_bandwidth lum = 4 * np.pi * D**2 * S * B else: if self.obs_freq_central is None: err_msg = ("Can not calculate energy without observing " "frequency. Provide obs_freq_central before " "calculating energy.") raise ValueError(err_msg) nu = self.obs_freq_central lum = 4 * np.pi * D**2 * S * nu return lum.to('erg s**-1') def calc_energy(self, use_bandwidth=False): D = self.calc_luminosity_distance() if self.fluence is None: err_msg = ("Can not calculate energy without fluence. Provide " "fluence or peak_flux and width before calculating " "energy.") raise ValueError(err_msg) else: fluence = self.fluence if use_bandwidth: if self.obs_bandwidth is None: err_msg = ("Can not calculate energy without observing " "bandwidth. Provide obs_bandwidth before " "calculating energy.") raise ValueError(err_msg) else: bandwidth = self.obs_bandwidth energy = fluence * bandwidth * 4 * np.pi * D**2 * (1 + self.z)**-1 else: if self.obs_freq_central is None: err_msg = ("Can not calculate energy without observing " "frequency. Provide obs_freq_central before " "calculating energy.") raise ValueError(err_msg) else: nu = self.obs_freq_central energy = fluence * nu * 4 * np.pi * D**2 * (1 + self.z)**-1 return energy.to("erg")
BSD 3-Clause New or Revised License
nodejs/node-gyp
gyp/pylib/gyp/generator/ninja.py
NinjaWriter.WriteSpec
python
def WriteSpec(self, spec, config_name, generator_flags): self.config_name = config_name self.name = spec["target_name"] self.toolset = spec["toolset"] config = spec["configurations"][config_name] self.target = Target(spec["type"]) self.is_standalone_static_library = bool( spec.get("standalone_static_library", 0) ) self.target_rpath = generator_flags.get("target_rpath", r"\$$ORIGIN/lib/") self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) self.xcode_settings = self.msvs_settings = None if self.flavor == "mac": self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) mac_toolchain_dir = generator_flags.get("mac_toolchain_dir", None) if mac_toolchain_dir: self.xcode_settings.mac_toolchain_dir = mac_toolchain_dir if self.flavor == "win": self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec, generator_flags) arch = self.msvs_settings.GetArch(config_name) self.ninja.variable("arch", self.win_env[arch]) self.ninja.variable("cc", "$cl_" + arch) self.ninja.variable("cxx", "$cl_" + arch) self.ninja.variable("cc_host", "$cl_" + arch) self.ninja.variable("cxx_host", "$cl_" + arch) self.ninja.variable("asm", "$ml_" + arch) if self.flavor == "mac": self.archs = self.xcode_settings.GetActiveArchs(config_name) if len(self.archs) > 1: self.arch_subninjas = { arch: ninja_syntax.Writer( OpenOutput( os.path.join( self.toplevel_build, self._SubninjaNameForArch(arch) ), "w", ) ) for arch in self.archs } actions_depends = [] compile_depends = [] if "dependencies" in spec: for dep in spec["dependencies"]: if dep in self.target_outputs: target = self.target_outputs[dep] actions_depends.append(target.PreActionInput(self.flavor)) compile_depends.append(target.PreCompileInput()) if target.uses_cpp: self.target.uses_cpp = True actions_depends = [item for item in actions_depends if item] compile_depends = [item for item in compile_depends if item] actions_depends = self.WriteCollapsedDependencies( "actions_depends", actions_depends ) compile_depends = self.WriteCollapsedDependencies( "compile_depends", compile_depends ) self.target.preaction_stamp = actions_depends self.target.precompile_stamp = compile_depends extra_sources = [] mac_bundle_depends = [] self.target.actions_stamp = self.WriteActionsRulesCopies( spec, extra_sources, actions_depends, mac_bundle_depends ) compile_depends_stamp = self.target.actions_stamp or compile_depends link_deps = [] try: sources = extra_sources + spec.get("sources", []) except TypeError: print("extra_sources: ", str(extra_sources)) print('spec.get("sources"): ', str(spec.get("sources"))) raise if sources: if self.flavor == "mac" and len(self.archs) > 1: for arch in self.archs: self.ninja.subninja(self._SubninjaNameForArch(arch)) pch = None if self.flavor == "win": gyp.msvs_emulation.VerifyMissingSources( sources, self.abs_build_dir, generator_flags, self.GypPathToNinja ) pch = gyp.msvs_emulation.PrecompiledHeader( self.msvs_settings, config_name, self.GypPathToNinja, self.GypPathToUniqueOutput, self.obj_ext, ) else: pch = gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, self.GypPathToNinja, lambda path, lang: self.GypPathToUniqueOutput(path + "-" + lang), ) link_deps = self.WriteSources( self.ninja, config_name, config, sources, compile_depends_stamp, pch, spec, ) obj_outputs = [f for f in sources if f.endswith(self.obj_ext)] if obj_outputs: if self.flavor != "mac" or len(self.archs) == 1: link_deps += [self.GypPathToNinja(o) for o in obj_outputs] else: print( "Warning: Actions/rules writing object files don't work with " "multiarch targets, dropping. (target %s)" % spec["target_name"] ) elif self.flavor == "mac" and len(self.archs) > 1: link_deps = collections.defaultdict(list) compile_deps = self.target.actions_stamp or actions_depends if self.flavor == "win" and self.target.type == "static_library": self.target.component_objs = link_deps self.target.compile_deps = compile_deps output = None is_empty_bundle = not link_deps and not mac_bundle_depends if link_deps or self.target.actions_stamp or actions_depends: output = self.WriteTarget( spec, config_name, config, link_deps, compile_deps ) if self.is_mac_bundle: mac_bundle_depends.append(output) if self.is_mac_bundle: output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle) if not output: return None assert self.target.FinalOutput(), output return self.target
The main entry point for NinjaWriter: write the build rules for a spec. Returns a Target object, which represents the output paths for this spec. Returns None if there are no outputs (e.g. a settings-only 'none' type target).
https://github.com/nodejs/node-gyp/blob/5585792922a97f0629f143c560efd74470eae87f/gyp/pylib/gyp/generator/ninja.py#L378-L550
import collections import copy import hashlib import json import multiprocessing import os.path import re import signal import subprocess import sys import gyp import gyp.common import gyp.msvs_emulation import gyp.MSVSUtil as MSVSUtil import gyp.xcode_emulation from io import StringIO from gyp.common import GetEnvironFallback import gyp.ninja_syntax as ninja_syntax generator_default_variables = { "EXECUTABLE_PREFIX": "", "EXECUTABLE_SUFFIX": "", "STATIC_LIB_PREFIX": "lib", "STATIC_LIB_SUFFIX": ".a", "SHARED_LIB_PREFIX": "lib", "INTERMEDIATE_DIR": "$!INTERMEDIATE_DIR", "SHARED_INTERMEDIATE_DIR": "$!PRODUCT_DIR/gen", "PRODUCT_DIR": "$!PRODUCT_DIR", "CONFIGURATION_NAME": "$|CONFIGURATION_NAME", "RULE_INPUT_ROOT": "${root}", "RULE_INPUT_DIRNAME": "${dirname}", "RULE_INPUT_PATH": "${source}", "RULE_INPUT_EXT": "${ext}", "RULE_INPUT_NAME": "${name}", } generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() def StripPrefix(arg, prefix): if arg.startswith(prefix): return arg[len(prefix) :] return arg def QuoteShellArgument(arg, flavor): if re.match(r"^[a-zA-Z0-9_=.\\/-]+$", arg): return arg if flavor == "win": return gyp.msvs_emulation.QuoteForRspFile(arg) return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'" def Define(d, flavor): if flavor == "win": d = d.replace("#", "\\%03o" % ord("#")) return QuoteShellArgument(ninja_syntax.escape("-D" + d), flavor) def AddArch(output, arch): output, extension = os.path.splitext(output) return f"{output}.{arch}{extension}" class Target: def __init__(self, type): self.type = type self.preaction_stamp = None self.precompile_stamp = None self.actions_stamp = None self.binary = None self.bundle = None self.component_objs = None self.compile_deps = None self.import_lib = None self.uses_cpp = False def Linkable(self): return self.type in ("static_library", "shared_library") def UsesToc(self, flavor): if flavor == "win" or self.bundle: return False return self.type in ("shared_library", "loadable_module") def PreActionInput(self, flavor): if self.UsesToc(flavor): return self.FinalOutput() + ".TOC" return self.FinalOutput() or self.preaction_stamp def PreCompileInput(self): return self.actions_stamp or self.precompile_stamp def FinalOutput(self): return self.bundle or self.binary or self.actions_stamp class NinjaWriter: def __init__( self, hash_for_rules, target_outputs, base_dir, build_dir, output_file, toplevel_build, output_file_name, flavor, toplevel_dir=None, ): self.hash_for_rules = hash_for_rules self.target_outputs = target_outputs self.base_dir = base_dir self.build_dir = build_dir self.ninja = ninja_syntax.Writer(output_file) self.toplevel_build = toplevel_build self.output_file_name = output_file_name self.flavor = flavor self.abs_build_dir = None if toplevel_dir is not None: self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir, build_dir)) self.obj_ext = ".obj" if flavor == "win" else ".o" if flavor == "win": self.win_env = {} for arch in ("x86", "x64"): self.win_env[arch] = "environment." + arch build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir) self.build_to_base = os.path.join(build_to_top, base_dir) base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir) self.base_to_build = os.path.join(base_to_top, build_dir) def ExpandSpecial(self, path, product_dir=None): PRODUCT_DIR = "$!PRODUCT_DIR" if PRODUCT_DIR in path: if product_dir: path = path.replace(PRODUCT_DIR, product_dir) else: path = path.replace(PRODUCT_DIR + "/", "") path = path.replace(PRODUCT_DIR + "\\", "") path = path.replace(PRODUCT_DIR, ".") INTERMEDIATE_DIR = "$!INTERMEDIATE_DIR" if INTERMEDIATE_DIR in path: int_dir = self.GypPathToUniqueOutput("gen") path = path.replace( INTERMEDIATE_DIR, os.path.join(product_dir or "", int_dir) ) CONFIGURATION_NAME = "$|CONFIGURATION_NAME" path = path.replace(CONFIGURATION_NAME, self.config_name) return path def ExpandRuleVariables(self, path, root, dirname, source, ext, name): if self.flavor == "win": path = self.msvs_settings.ConvertVSMacros(path, config=self.config_name) path = path.replace(generator_default_variables["RULE_INPUT_ROOT"], root) path = path.replace(generator_default_variables["RULE_INPUT_DIRNAME"], dirname) path = path.replace(generator_default_variables["RULE_INPUT_PATH"], source) path = path.replace(generator_default_variables["RULE_INPUT_EXT"], ext) path = path.replace(generator_default_variables["RULE_INPUT_NAME"], name) return path def GypPathToNinja(self, path, env=None): if env: if self.flavor == "mac": path = gyp.xcode_emulation.ExpandEnvVars(path, env) elif self.flavor == "win": path = gyp.msvs_emulation.ExpandMacros(path, env) if path.startswith("$!"): expanded = self.ExpandSpecial(path) if self.flavor == "win": expanded = os.path.normpath(expanded) return expanded if "$|" in path: path = self.ExpandSpecial(path) assert "$" not in path, path return os.path.normpath(os.path.join(self.build_to_base, path)) def GypPathToUniqueOutput(self, path, qualified=True): path = self.ExpandSpecial(path) assert not path.startswith("$"), path obj = "obj" if self.toolset != "target": obj += "." + self.toolset path_dir, path_basename = os.path.split(path) assert not os.path.isabs(path_dir), ( "'%s' can not be absolute path (see crbug.com/462153)." % path_dir ) if qualified: path_basename = self.name + "." + path_basename return os.path.normpath( os.path.join(obj, self.base_dir, path_dir, path_basename) ) def WriteCollapsedDependencies(self, name, targets, order_only=None): assert targets == [item for item in targets if item], targets if len(targets) == 0: assert not order_only return None if len(targets) > 1 or order_only: stamp = self.GypPathToUniqueOutput(name + ".stamp") targets = self.ninja.build(stamp, "stamp", targets, order_only=order_only) self.ninja.newline() return targets[0] def _SubninjaNameForArch(self, arch): output_file_base = os.path.splitext(self.output_file_name)[0] return f"{output_file_base}.{arch}.ninja"
MIT License
lun-4/jose
unused/coins.py
Coins.update_accounts
python
async def update_accounts(self, accounts: list): total = 0 for account in accounts: if account['amount'].is_finite(): account['amount'] = round(account['amount'], 3) try: account['taxpaid'] = round(account['taxpaid'], 4) except: pass self.cache[account['id']] = self.convert_account(account) account = self.unconvert_account(account) res = await self.jcoin_coll.update_one({'id': account['id']}, {'$set': account}) if res.modified_count > 1: log.warning('Updating more than supposed to (%d)', res.modified_count) else: total += res.modified_count log.debug('[update_accounts] Updated %d documents', total)
Update accounts to the jcoin collection. This converts `decimal.Decimal` to `str` in the ``amount`` field so we maintain the precision of decimal while fetching/saving. Updates the cache.
https://github.com/lun-4/jose/blob/fdafb121e0c5d7a731b52b3503f6b12e3538948a/unused/coins.py#L291-L322
import time import decimal import logging import collections import asyncio import pprint import math import discord from random import SystemRandom from discord.ext import commands from .common import Cog, CoinConverter log = logging.getLogger(__name__) random = SystemRandom() REWARD_COOLDOWN = 1800 TAX_CONSTANT = decimal.Decimal('1.0065') PROB_CONSTANT = decimal.Decimal('1.003384590736') COIN_BASE_PROBABILITY = decimal.Decimal('0.012') TAX_MULTIPLIER = decimal.Decimal('1.42') class TransferError(Exception): pass TRANSFER_OBJECTS = [ 'bananas', 'computers', 'dogs', 'memes', 'cats', 'coins', 'paintings', ] class Coins(Cog, requires=['config']): def __init__(self, bot): super().__init__(bot) self.jcoin_coll = self.config.jose_db['josecoin'] self.hidecoin_coll = self.config.jose_db['jcoin-hidecoin'] self.BASE_PROBABILITY = COIN_BASE_PROBABILITY self.INF = decimal.Decimal('inf') self.TransferError = TransferError self.bot.simple_exc.append(TransferError) self.reward_env = {} self.acct_cache = collections.defaultdict(list) self.gacct_locks = collections.defaultdict(asyncio.Lock) self.cache = {} self.transfer_lock = asyncio.Lock() self.delete_lock = asyncio.Lock() self.locked_accounts = [] self.gdp = None self.transfers_done = 0 def get_name(self, user_id, account=None): if isinstance(user_id, discord.Guild): return f'taxbank:{user_id.name}' elif isinstance(user_id, discord.User): return str(user_id) obj = self.bot.get_user(int(user_id)) if obj is None: obj = self.bot.get_guild(user_id) if obj is not None: obj = f'taxbank:{obj}' if obj is None: if account: if account['type'] == 'user': return f'Unfindable User {user_id}' elif account['type'] == 'guild': return f'Unfindable Guild {user_id}' else: return f'Unfindable Unknown {user_id}' else: return f'Unfindable ID {user_id}' return str(obj) def empty_account(self, account_id, account_type, amount): if account_type == 'user': return { 'type': 'user', 'amount': str(decimal.Decimal(amount)), 'id': account_id, 'taxpaid': str(decimal.Decimal(0)), 'times_stolen': 0, 'success_steal': 0, 'loaning_from': None, 'interest_tbank': '', } elif account_type == 'taxbank': return { 'type': 'taxbank', 'id': account_id, 'amount': str(decimal.Decimal(0)), 'loans': {}, } async def new_account(self, account_id: int, account_type: str='user', init_amount: int=0): if (await self.get_account(account_id, True)) is not None: return False account = self.empty_account(account_id, account_type, init_amount) try: r = await self.jcoin_coll.insert_one(account) log.debug('ACK insert: %s', r.acknowledged) self.cache[account_id] = account return True except: log.exception('Error creating a new account') return False async def sane(self): if not (await self.get_account(self.bot.user.id)): await self.new_account(self.bot.user.id, 'user', 'inf') async def ensure_taxbank(self, ctx): if ctx.guild is None: raise self.SayException("No guild was found to be a taxbank, " "don't do this command in a DM.") if await self.get_account(ctx.guild.id) is None: await self.new_account(ctx.guild.id, 'taxbank') def convert_account(self, account: dict) -> dict: if not account: return None new_account = dict(account) new_account['amount'] = decimal.Decimal(account['amount']) try: new_account['taxpaid'] = decimal.Decimal(account['taxpaid']) except KeyError: pass return new_account def unconvert_account(self, account: dict) -> dict: if not account: return None new_account = dict(account) new_account['amount'] = str(account['amount']) try: new_account['taxpaid'] = str(account['taxpaid']) except KeyError: pass return new_account async def get_account(self, account_id: int, override_cache: bool=False) -> dict: if account_id in self.cache and (not override_cache): return self.convert_account(self.cache[account_id]) account = await self.jcoin_coll.find_one({'id': account_id}) if not account: self.cache[account_id] = None return None c_account = self.convert_account(account) if not override_cache: self.cache[account_id] = c_account return c_account def cache_invalidate(self, user_id: int) -> 'NoneType': try: self.cache.pop(user_id) except KeyError: pass async def get_accounts_type(self, acc_type: str) -> list: cur = self.jcoin_coll.find({'type': acc_type}) accounts = [] async for account in cur: accounts.append(self.convert_account(account)) return accounts def lock_account(self, account_id: int): try: self.locked_accounts.index(account_id) except ValueError: self.locked_accounts.append(account_id) def unlock_account(self, account_id: int): try: self.locked_accounts.remove(account_id) except ValueError: pass def is_locked(self, account_id: int) -> bool: try: self.locked_accounts.index(account_id) return True except ValueError: return False
MIT License
numba/numba
numba/pycc/compiler.py
_ModuleCompiler._emit_python_wrapper
python
def _emit_python_wrapper(self, llvm_module): raise NotImplementedError
Emit generated Python wrapper and extension module code.
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/pycc/compiler.py#L123-L126
import logging import os import sys from llvmlite import ir from llvmlite.binding import Linkage import llvmlite.llvmpy.core as lc from numba.pycc import llvm_types as lt from numba.core.compiler import compile_extra, Flags from numba.core.compiler_lock import global_compiler_lock from numba.core.registry import cpu_target from numba.core.runtime import nrtdynmod from numba.core import cgutils logger = logging.getLogger(__name__) __all__ = ['Compiler'] NULL = lc.Constant.null(lt._void_star) ZERO = lc.Constant.int(lt._int32, 0) ONE = lc.Constant.int(lt._int32, 1) METH_VARARGS_AND_KEYWORDS = lc.Constant.int(lt._int32, 1|2) def get_header(): import numpy import textwrap return textwrap.dedent("""\ #include <stdint.h> #ifndef HAVE_LONGDOUBLE #define HAVE_LONGDOUBLE %d #endif typedef struct { float real; float imag; } complex64; typedef struct { double real; double imag; } complex128; #if HAVE_LONGDOUBLE typedef struct { long double real; long double imag; } complex256; #endif typedef float float32; typedef double float64; #if HAVE_LONGDOUBLE typedef long double float128; #endif """ % hasattr(numpy, 'complex256')) class ExportEntry(object): def __init__(self, symbol, signature, function): self.symbol = symbol self.signature = signature self.function = function def __repr__(self): return "ExportEntry(%r, %r)" % (self.symbol, self.signature) class _ModuleCompiler(object): method_def_ty = lc.Type.struct((lt._int8_star, lt._void_star, lt._int32, lt._int8_star)) method_def_ptr = lc.Type.pointer(method_def_ty) env_def_ty = lc.Type.struct((lt._void_star, lt._int32, lt._void_star)) env_def_ptr = lc.Type.pointer(env_def_ty) def __init__(self, export_entries, module_name, use_nrt=False, **aot_options): self.module_name = module_name self.export_python_wrap = False self.dll_exports = [] self.export_entries = export_entries self.external_init_function = None self.use_nrt = use_nrt self.typing_context = cpu_target.typing_context self.context = cpu_target.target_context.with_aot_codegen( self.module_name, **aot_options) def _mangle_method_symbol(self, func_name): return "._pycc_method_%s" % (func_name,)
BSD 2-Clause Simplified License
facultyai/faculty
faculty/datasets/__init__.py
rmdir
python
def rmdir(project_path, project_id=None, object_client=None): contents = ls( prefix=project_path, project_id=project_id, show_hidden=True, object_client=object_client, ) rationalised_path = _rationalise_path(project_path) project_path_as_file = rationalised_path.rstrip("/") project_path_as_dir = project_path_as_file + "/" if contents == [project_path_as_dir]: rm( project_path_as_dir, project_id=project_id, object_client=object_client, recursive=True, ) elif contents == [project_path_as_file]: raise DatasetsError("'{}' Not a directory".format(project_path)) elif project_path_as_dir not in contents: raise DatasetsError( "'{}' No such file or directory".format(project_path) ) else: raise DatasetsError("'{}' Directory is not empty".format(project_path))
Remove an empty directory from the project datasets. Parameters ---------- remote_path : str The path of the directory to remove. project_id : str, optional The project to get files from. You need to have access to this project for it to work. Defaults to the project set by FACULTY_PROJECT_ID in your environment. object_client : faculty.clients.object.ObjectClient, optional Advanced - can be used to benefit from caching in chain interactions with datasets.
https://github.com/facultyai/faculty/blob/c8316e2bb567b3081989b6f51f9b5bd0ee93aa85/faculty/datasets/__init__.py#L418-L459
import fnmatch import os import posixpath import contextlib import tempfile import io from faculty.session import get_session from faculty.context import get_context from faculty.clients.object import ObjectClient from faculty.datasets import transfer from faculty.datasets.util import DatasetsError SherlockMLDatasetsError = DatasetsError def ls(prefix="/", project_id=None, show_hidden=False, object_client=None): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() list_response = object_client.list(project_id, prefix) paths = [obj.path for obj in list_response.objects] while list_response.next_page_token is not None: list_response = object_client.list( project_id, prefix, list_response.next_page_token ) paths += [obj.path for obj in list_response.objects] if show_hidden: return paths else: non_hidden_paths = [ path for path in paths if not any(element.startswith(".") for element in path.split("/")) ] return non_hidden_paths def glob( pattern, prefix="/", project_id=None, show_hidden=False, object_client=None ): contents = ls( prefix=prefix, project_id=project_id, show_hidden=show_hidden, object_client=object_client, ) return fnmatch.filter(contents, pattern) def _isdir(project_path, project_id=None, object_client=None): if not project_path.endswith("/"): project_path += "/" matches = ls( project_path, project_id=project_id, show_hidden=True, object_client=object_client, ) return len(matches) >= 1 def _isfile(project_path, project_id=None, object_client=None): if _isdir(project_path, project_id): return False matches = ls( project_path, project_id=project_id, show_hidden=True, object_client=object_client, ) rationalised_path = _rationalise_path(project_path) return any(match == rationalised_path for match in matches) def _create_parent_directories(project_path, project_id, object_client): parent_path = posixpath.dirname(project_path) object_client.create_directory(project_id, parent_path, parents=True) def _put_file(local_path, project_path, project_id, object_client): transfer.upload_file(object_client, project_id, project_path, local_path) def _put_directory(local_path, project_path, project_id, object_client): object_client.create_directory(project_id, project_path) for entry in os.listdir(local_path): _put_recursive( os.path.join(local_path, entry), posixpath.join(project_path, entry), project_id, object_client, ) def _put_recursive(local_path, project_path, project_id, object_client): if os.path.isdir(local_path): _put_directory(local_path, project_path, project_id, object_client) else: _put_file(local_path, project_path, project_id, object_client) def put(local_path, project_path, project_id=None, object_client=None): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() if hasattr(os, "fspath"): local_path = os.fspath(local_path) _create_parent_directories(project_path, project_id, object_client) _put_recursive(local_path, project_path, project_id, object_client) def _get_file(project_path, local_path, project_id, object_client): if local_path.endswith("/"): msg = ( "the source path {} is a normal file but the destination " "path {} indicates a directory - please provide a " "full destination path" ).format(repr(project_path), repr(local_path)) raise DatasetsError(msg) transfer.download_file(object_client, project_id, project_path, local_path) def _get_directory(project_path, local_path, project_id, object_client): containing_dir = os.path.dirname(local_path) if not containing_dir: containing_dir = "." if not os.path.isdir(containing_dir): msg = "No such directory: {}".format(repr(containing_dir)) raise IOError(msg) paths_to_get = ls( project_path, project_id=project_id, show_hidden=True, object_client=object_client, ) for object_path in paths_to_get: local_dest = os.path.join( local_path, _get_relative_path(project_path, object_path) ) if object_path.endswith("/"): if not os.path.exists(local_dest): os.makedirs(local_dest) else: dirname = os.path.dirname(local_dest) if not os.path.exists(dirname): os.makedirs(dirname) _get_file(object_path, local_dest, project_id, object_client) def get(project_path, local_path, project_id=None, object_client=None): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() if hasattr(os, "fspath"): local_path = os.fspath(local_path) if _isdir(project_path, project_id, object_client): _get_directory(project_path, local_path, project_id, object_client) else: _get_file(project_path, local_path, project_id, object_client) def mv(source_path, destination_path, project_id=None, object_client=None): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() if source_path == destination_path: return cp( source_path, destination_path, project_id=project_id, recursive=True, object_client=object_client, ) rm( source_path, project_id=project_id, recursive=True, object_client=object_client, ) def cp( source_path, destination_path, project_id=None, recursive=False, object_client=None, ): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() _create_parent_directories(destination_path, project_id, object_client) object_client.copy( project_id, source_path, destination_path, recursive=recursive ) def rm(project_path, project_id=None, recursive=False, object_client=None): project_id = project_id or get_context().project_id object_client = object_client or _default_session_object_client() object_client.delete(project_id, project_path, recursive=recursive)
Apache License 2.0
yukinoshita47/yuki-chan-the-auto-pentest
Module/metagoofil/hachoir_core/field/generic_field_set.py
GenericFieldSet.seekByte
python
def seekByte(self, address, name="padding[]", description=None, relative=True, null=False): return self.seekBit(address * 8, name, description, relative, null=null)
Same as seekBit(), but with address in byte.
https://github.com/yukinoshita47/yuki-chan-the-auto-pentest/blob/bea1af4e1d544eadc166f728be2f543ea10af191/Module/metagoofil/hachoir_core/field/generic_field_set.py#L431-L435
from hachoir_core.field import (MissingField, BasicFieldSet, Field, ParserError, createRawField, createNullField, createPaddingField, FakeArray) from hachoir_core.dict import Dict, UniqKeyError from hachoir_core.error import HACHOIR_ERRORS from hachoir_core.tools import lowerBound import hachoir_core.config as config class GenericFieldSet(BasicFieldSet): _current_size = 0 def __init__(self, parent, name, stream, description=None, size=None): BasicFieldSet.__init__(self, parent, name, stream, description, size) self._fields = Dict() self._field_generator = self.createFields() self._array_cache = {} self.__is_feeding = False def array(self, key): try: return self._array_cache[key] except KeyError: array = FakeArray(self, key) self._array_cache[key] = array return self._array_cache[key] def reset(self): BasicFieldSet.reset(self) self._fields = Dict() self._field_generator = self.createFields() self._current_size = 0 self._array_cache = {} def __str__(self): return '<%s path=%s, current_size=%s, current length=%s>' % (self.__class__.__name__, self.path, self._current_size, len(self._fields)) def __len__(self): if self._field_generator is not None: self._feedAll() return len(self._fields) def _getCurrentLength(self): return len(self._fields) current_length = property(_getCurrentLength) def _getSize(self): if self._size is None: self._feedAll() return self._size size = property(_getSize, doc="Size in bits, may create all fields to get size") def _getCurrentSize(self): assert not(self.done) return self._current_size current_size = property(_getCurrentSize) eof = property(lambda self: self._checkSize(self._current_size + 1, True) < 0) def _checkSize(self, size, strict): field = self while field._size is None: if not field._parent: assert self.stream.size is None if not strict: return None if self.stream.sizeGe(size): return 0 break size += field._address field = field._parent return field._size - size autofix = property(lambda self: self.root.autofix) def _addField(self, field): if not issubclass(field.__class__, Field): raise ParserError("Field type (%s) is not a subclass of 'Field'!" % field.__class__.__name__) assert isinstance(field._name, str) if field._name.endswith("[]"): self.setUniqueFieldName(field) if config.debug: self.info("[+] DBG: _addField(%s)" % field.name) if field._address != self._current_size: self.warning("Fix address of %s to %s (was %s)" % (field.path, self._current_size, field._address)) field._address = self._current_size ask_stop = False self.__is_feeding = True try: field_size = field.size except HACHOIR_ERRORS, err: if field.is_field_set and field.current_length and field.eof: self.warning("Error when getting size of '%s': %s" % (field.name, err)) field._stopFeeding() ask_stop = True else: self.warning("Error when getting size of '%s': delete it" % field.name) self.__is_feeding = False raise self.__is_feeding = False dsize = self._checkSize(field._address + field.size, False) if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0): if self.autofix and self._current_size: self._fixFieldSize(field, field.size + dsize) else: raise ParserError("Field %s is too large!" % field.path) self._current_size += field.size try: self._fields.append(field._name, field) except UniqKeyError, err: self.warning("Duplicate field name " + unicode(err)) field._name += "[]" self.setUniqueFieldName(field) self._fields.append(field._name, field) if ask_stop: raise StopIteration() def _fixFieldSize(self, field, new_size): if new_size > 0: if field.is_field_set and 0 < field.size: field._truncate(new_size) return if self._size is None: self._size = self._current_size + new_size self.warning("[Autofix] Delete '%s' (too large)" % field.path) raise StopIteration() def _getField(self, name, const): field = Field._getField(self, name, const) if field is None: if name in self._fields: field = self._fields[name] elif self._field_generator is not None and not const: field = self._feedUntil(name) return field def getField(self, key, const=True): if isinstance(key, (int, long)): if key < 0: raise KeyError("Key must be positive!") if not const: self.readFirstFields(key+1) if len(self._fields.values) <= key: raise MissingField(self, key) return self._fields.values[key] return Field.getField(self, key, const) def _truncate(self, size): assert size > 0 if size < self._current_size: self._size = size while True: field = self._fields.values[-1] if field._address < size: break del self._fields[-1] self._current_size = field._address size -= field._address if size < field._size: if field.is_field_set: field._truncate(size) else: del self._fields[-1] field = createRawField(self, size, "raw[]") self._fields.append(field._name, field) self._current_size = self._size else: assert size < self._size or self._size is None self._size = size if self._size == self._current_size: self._field_generator = None def _deleteField(self, index): field = self._fields.values[index] size = field.size self._current_size -= size del self._fields[index] return field def _fixLastField(self): assert self._size is not None message = ["stop parser"] self._field_generator = None while self._size < self._current_size: field = self._deleteField(len(self._fields)-1) message.append("delete field %s" % field.path) assert self._current_size <= self._size size = self._size - self._current_size if size: field = createRawField(self, size, "raw[]") message.append("add padding") self._current_size += field.size self._fields.append(field._name, field) else: field = None message = ", ".join(message) self.warning("[Autofix] Fix parser error: " + message) assert self._current_size == self._size return field def _stopFeeding(self): new_field = None if self._size is None: if self._parent: self._size = self._current_size elif self._size != self._current_size: if self.autofix: new_field = self._fixLastField() else: raise ParserError("Invalid parser \"%s\" size!" % self.path) self._field_generator = None return new_field def _fixFeedError(self, exception): if self._size is None or not self.autofix: return False self.warning(unicode(exception)) return self._fixLastField() def _feedUntil(self, field_name): if self.__is_feeding or (self._field_generator and self._field_generator.gi_running): self.warning("Unable to get %s (and generator is already running)" % field_name) return None try: while True: field = self._field_generator.next() self._addField(field) if field.name == field_name: return field except HACHOIR_ERRORS, err: if self._fixFeedError(err) is False: raise except StopIteration: self._stopFeeding() return None def readMoreFields(self, number): if self._field_generator is None: return 0 oldlen = len(self._fields) try: for index in xrange(number): self._addField( self._field_generator.next() ) except HACHOIR_ERRORS, err: if self._fixFeedError(err) is False: raise except StopIteration: self._stopFeeding() return len(self._fields) - oldlen def _feedAll(self): if self._field_generator is None: return try: while True: field = self._field_generator.next() self._addField(field) except HACHOIR_ERRORS, err: if self._fixFeedError(err) is False: raise except StopIteration: self._stopFeeding() def __iter__(self): try: done = 0 while True: if done == len(self._fields): if self._field_generator is None: break self._addField( self._field_generator.next() ) for field in self._fields.values[done:]: yield field done += 1 except HACHOIR_ERRORS, err: field = self._fixFeedError(err) if isinstance(field, Field): yield field elif hasattr(field, '__iter__'): for f in field: yield f elif field is False: raise except StopIteration: field = self._stopFeeding() if isinstance(field, Field): yield field elif hasattr(field, '__iter__'): for f in field: yield f def _isDone(self): return (self._field_generator is None) done = property(_isDone, doc="Boolean to know if parsing is done or not") def seekBit(self, address, name="padding[]", description=None, relative=True, null=False): if relative: nbits = address - self._current_size else: nbits = address - (self.absolute_address + self._current_size) if nbits < 0: raise ParserError("Seek error, unable to go back!") if 0 < nbits: if null: return createNullField(self, nbits, name, description) else: return createPaddingField(self, nbits, name, description) else: return None
MIT License
northisup/querybuilder
querybuilder/filters.py
Filter.__init__
python
def __init__( self, id=None, field=None, label=None, description=None, type=None, optgroup=None, input=None, values=(), value_separator=None, default_value=None, input_event=None, size=None, rows=None, multiple=None, placeholder=None, vertical=None, validation=None, operators=(), plugin=None, plugin_config=None, data=None, valueSetter=None, valueGetter=None, ): self.id = id self.type = Type(type) if type else type self.field = field self.label = label self.description = description self.optgroup = optgroup self.input = Input(input) if input else input self.values = values if self.input in (Input.CHECKBOX, Input.RADIO) and not self.values: raise ValueError('values are required when using input %s' % self.input) self.value_separator = value_separator self.default_value = default_value self.input_event = input_event self.size = size self.rows = rows self.multiple = multiple self.placeholder = placeholder self.vertical = vertical self.validation = dict(validation or {}) self.operators = [Operator(op) for op in operators] self.plugin = plugin self.plugin_config = plugin_config self.data = data self.valueSetter = valueSetter self.valueGetter = valueGetter self.func = None self._validation_functions = frozenset( getattr(self, func_name) for func_name in dir(self) if func_name.startswith('validate_') and callable(getattr(self, func_name)) )
Args: id (str): Unique identifier of the filter. By default this is the name of the function it is decorating. field (str): ??? understand this better Field used by the filter, multiple filters can use the same field. label (str): Label used to display the filter. It can be simple string or a map for localization. description (str): Detailed description for display as help text. type (str or Type): Type of the field. Available types are in `Type` optgroup (str): Group name to group this filter with input (str or Input): Type of input used. Available inputs are in `Inputs` values ([Values]): Required for `radio` and `checkbox` inputs. Generally needed for select inputs. value_separator (str): Used the split and join the value when a text input is used with an operator allowing multiple values (between for example). default_value: The default value. validation ([Validation]): Object of options for rule validation. See the `Validation` class. operators ([Operator)]): Array of operators types to use for this filter. If empty the filter will use all applicable operators. data (dict): Additional data not used by QueryBuilder but that will be added to the output rules object. Use this to store any functional data you need. Args with only front end uses: input_event: Space separated list of DOM events which the builder should listen to detect value changes. plugin: Name of a jQuery plugin to apply on the input. plugin_config: Object of parameters to pass to the plugin. valueSetter: Function used to set the input(s) value. If provided the default function is not run. It takes 2 parameters: rule, value valueGetter: Function used to get the input(s) value. If provided the default function is not run. It takes 1 parameter: rule Only for text and textarea inputs: size: horizontal size of the input. rows: vertical size of the input. placeholder: placeholder to display inside the input. Only for select inputs: multiple: accept multiple values. Only for radio and checkbox inputs: vertical: display inputs vertically on not horizontally.
https://github.com/northisup/querybuilder/blob/67b0539345e280669985b90e26b4df3809e01d74/querybuilder/filters.py#L115-L231
from __future__ import absolute_import import re from datetime import ( date, datetime, time, ) from decimal import ( Context, Decimal, ) import six from cached_property import cached_property from querybuilder.constants import ( Input, Operator, Type, ) from querybuilder.core import ToDictMixin class Filters(object): def run_filter_for_rule(self, rule): filter = Filter._filter_registry[rule.id] filter_operand = filter.func(self) if not filter.validate(filter_operand): return False, filter_operand rule_operand = filter.python_value(rule.value) operator_handler = filter.handler_for_operator(rule.operator) if isinstance(rule_operand, (list, tuple)): return operator_handler(filter, filter_operand, *rule_operand), filter_operand else: return operator_handler(filter, filter_operand, rule_operand), filter_operand class FilterMeta(type): def __new__(metacls, name, bases, attrs): cls = super(FilterMeta, metacls).__new__(metacls, name, bases, attrs) for name, attr in attrs.items(): if hasattr(attr, 'operator'): cls._operator_handlers[attr.operator] = attr return cls class Filter(six.with_metaclass(FilterMeta, ToDictMixin)): _filter_registry = {} _operator_handlers = {} _validation_functions = frozenset() DICT_KEYS = ('id', 'type', 'field', 'label', 'description', 'optgroup', 'input', 'values', 'value_separator', 'default_value', 'input_event', 'size', 'rows', 'multiple', 'placeholder', 'vertical', 'validation', 'operators', 'plugin', 'plugin_config', 'data', 'valueSetter', 'valueGetter') TO_PYTHON = None
MIT License
tensorflow/data-validation
tensorflow_data_validation/statistics/generators/partitioned_stats_generator.py
PartitionedStatisticsAnalyzer.__init__
python
def __init__(self, min_partitions_stat_presence: int): self._min_partitions_stat_presence = min_partitions_stat_presence
Initializes the analyzer.
https://github.com/tensorflow/data-validation/blob/9855619b40a1c6dab2be3509fa252eaea5120596/tensorflow_data_validation/statistics/generators/partitioned_stats_generator.py#L129-L134
import collections import functools from typing import Dict, Iterable, Text, Tuple import apache_beam as beam import numpy as np import pyarrow as pa from tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.statistics.generators import stats_generator from tensorflow_data_validation.utils import stats_util from tfx_bsl.arrow import table_util from tensorflow_metadata.proto.v0 import statistics_pb2 def _assign_to_partition(sliced_record_batch: types.SlicedRecordBatch, num_partitions: int ) -> Tuple[Tuple[types.SliceKey, int], pa.RecordBatch]: slice_key, record_batch = sliced_record_batch return (slice_key, np.random.randint(num_partitions)), record_batch def _get_partitioned_statistics_summary( statistics: Dict[types.FeaturePath, Dict[Text, np.ndarray]] ) -> Dict[types.FeaturePath, Dict[Text, float]]: summary = collections.defaultdict(collections.defaultdict) for feature_path, feature_statistics in statistics.items(): summary_for_feature = summary[feature_path] for stat_name, stat_values in feature_statistics.items(): summary_for_feature['min_' + stat_name] = np.min(stat_values) summary_for_feature['max_' + stat_name] = np.max(stat_values) summary_for_feature['mean_' + stat_name] = np.mean(stat_values) summary_for_feature['median_' + stat_name] = np.median(stat_values) summary_for_feature['std_dev_' + stat_name] = np.std(stat_values) summary_for_feature['num_partitions_' + stat_name] = stat_values.size return summary def get_valid_statistics( statistics: Dict[types.FeaturePath, Dict[Text, np.ndarray]], min_partitions_stat_presence: int ) -> Dict[types.FeaturePath, Dict[Text, np.ndarray]]: valid_statistics = collections.defaultdict(collections.defaultdict) for feature_path, feature_statistics in statistics.items(): for stat_name, stat_values in feature_statistics.items(): if len(stat_values) >= min_partitions_stat_presence: valid_statistics[feature_path][stat_name] = np.array(stat_values) return valid_statistics class PartitionedStatsFn(object): def compute(self, examples: types.ExampleBatch ) -> statistics_pb2.DatasetFeatureStatistics: raise NotImplementedError() class _PartitionedStatisticsAnalyzerAccumulator(object): def __init__(self): self.statistics = collections.defaultdict( functools.partial(collections.defaultdict, list)) class PartitionedStatisticsAnalyzer(beam.CombineFn):
Apache License 2.0
m3dev/pptx-template
.eggs/python_pptx-0.6.6-py3.6.egg/pptx/parts/image.py
Image.dpi
python
def dpi(self): def int_dpi(dpi): try: int_dpi = int(round(float(dpi))) if int_dpi < 1 or int_dpi > 2048: int_dpi = 72 except (TypeError, ValueError): int_dpi = 72 return int_dpi def normalize_pil_dpi(pil_dpi): if isinstance(pil_dpi, tuple): return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1])) return (72, 72) return normalize_pil_dpi(self._pil_props[2])
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch resolution of this image. A default value of (72, 72) is used if the dpi is not specified in the image file.
https://github.com/m3dev/pptx-template/blob/bccd95728fc27963dabdd53bd3a2ee92233d5176/.eggs/python_pptx-0.6.6-py3.6.egg/pptx/parts/image.py#L193-L224
from __future__ import ( absolute_import, division, print_function, unicode_literals ) import hashlib import os try: from PIL import Image as PIL_Image except ImportError: import Image as PIL_Image from ..compat import BytesIO, is_string from ..opc.package import Part from ..opc.spec import image_content_types from ..util import lazyproperty class ImagePart(Part): def __init__(self, partname, content_type, blob, package, filename=None): super(ImagePart, self).__init__( partname, content_type, blob, package ) self._filename = filename @classmethod def load(cls, partname, content_type, blob, package): return cls(partname, content_type, blob, package) @classmethod def new(cls, package, image): partname = package.next_image_partname(image.ext) return cls( partname, image.content_type, image.blob, package, image.filename ) @property def desc(self): if self._filename is None: return 'image.%s' % self.ext return self._filename @property def ext(self): return self.partname.ext @property def image(self): return Image(self.blob, self.desc) def scale(self, scaled_cx, scaled_cy): image_cx, image_cy = self._native_size if scaled_cx is None and scaled_cy is None: scaled_cx = image_cx scaled_cy = image_cy elif scaled_cx is None: scaling_factor = float(scaled_cy) / float(image_cy) scaled_cx = int(round(image_cx * scaling_factor)) elif scaled_cy is None: scaling_factor = float(scaled_cx) / float(image_cx) scaled_cy = int(round(image_cy * scaling_factor)) return scaled_cx, scaled_cy @lazyproperty def sha1(self): return hashlib.sha1(self._blob).hexdigest() @property def _dpi(self): image = Image.from_blob(self.blob) return image.dpi @property def _native_size(self): EMU_PER_INCH = 914400 horz_dpi, vert_dpi = self._dpi width_px, height_px = self._px_size width = EMU_PER_INCH * width_px / horz_dpi height = EMU_PER_INCH * height_px / vert_dpi return width, height @property def _px_size(self): image = Image.from_blob(self.blob) return image.size class Image(object): def __init__(self, blob, filename): super(Image, self).__init__() self._blob = blob self._filename = filename @classmethod def from_blob(cls, blob, filename=None): return cls(blob, filename) @classmethod def from_file(cls, image_file): if is_string(image_file): with open(image_file, 'rb') as f: blob = f.read() filename = os.path.basename(image_file) else: blob = image_file.read() filename = None return cls.from_blob(blob, filename) @property def blob(self): return self._blob @lazyproperty def content_type(self): return image_content_types[self.ext] @lazyproperty
Apache License 2.0
wolframresearch/wolframclientforpython
wolframclient/evaluation/cloud/base.py
OAuthSessionBase.signed_request
python
def signed_request(self, uri, headers={}, data=None, method="POST"): raise NotImplementedError
Sign a given request and issue it.
https://github.com/wolframresearch/wolframclientforpython/blob/27cffef560eea8d16c02fe4086f42363604284b6/wolframclient/evaluation/cloud/base.py#L73-L75
from __future__ import absolute_import, print_function, unicode_literals import json from io import IOBase from wolframclient.utils import six from wolframclient.utils.api import oauth, urllib __all__ = [ "SecuredAuthenticationKey", "UserIDPassword", "OAuthSessionBase", "OAuthAsyncSessionBase", ] class SecuredAuthenticationKey(object): is_xauth = False def __init__(self, consumer_key, consumer_secret): self.consumer_key = consumer_key self.consumer_secret = consumer_secret class UserIDPassword(object): is_xauth = True def __init__(self, user, password): self.user = user self.password = password class OAuthSessionBase(object): DEFAULT_CONTENT_TYPE = { "Content-Type": "application/x-www-form-urlencoded", "User-Agent": "WolframClientForPython/1.0", } def __init__( self, server, consumer_key, consumer_secret, signature_method=None, client_class=oauth.Client, ): self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.signature_method = signature_method or oauth.SIGNATURE_HMAC self.client_class = client_class self._client = None self._oauth_token = None self._oauth_token_secret = None self.server = server def authenticate(self): raise NotImplementedError
MIT License
skshetry/webdav4
src/webdav4/fsspec.py
WebdavFile.read
python
def read(self, length: int = -1) -> Union[str, bytes, None]: chunk = self.reader.read(length) if chunk: self.loc += len(chunk) return chunk
Read chunk of bytes.
https://github.com/skshetry/webdav4/blob/ba1d581f3768ec569b727135fa81d36762ecbe5a/src/webdav4/fsspec.py#L433-L438
import errno import io import os import tempfile from contextlib import contextmanager from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterator, List, NamedTuple, NoReturn, Optional, TextIO, Tuple, Type, Union, cast, ) from fsspec import Callback from fsspec.spec import AbstractBufferedFile, AbstractFileSystem from .client import ( Client, IsACollectionError, IsAResourceError, ResourceAlreadyExists, ResourceConflict, ResourceNotFound, ) from .fs_utils import peek_filelike_length from .stream import read_into if TYPE_CHECKING: from array import ArrayType from datetime import datetime from mmap import mmap from os import PathLike from typing import AnyStr from .callback import CallbackFn from .types import AuthTypes, URLTypes mapping = {"content_length": "size", "path": "name", "type": "type"} def translate_info(item: Union[str, Dict[str, Any]]) -> Dict[str, Any]: assert not isinstance(item, str) return {mapping.get(key, key): value for key, value in item.items()} @contextmanager def translate_exceptions() -> Iterator[None]: try: yield except ResourceNotFound as exc: raise FileNotFoundError( errno.ENOENT, "No such file or directory", exc.path ) from exc except IsACollectionError as exc: raise IsADirectoryError( errno.EISDIR, "Is a directory", exc.path ) from exc except IsAResourceError as exc: raise NotADirectoryError( errno.ENOTDIR, "Not a directory", exc.path ) from exc class WebdavFileSystem(AbstractFileSystem): protocol = ("webdav", "dav") def __init__( self, base_url: "URLTypes", auth: "AuthTypes" = None, client: "Client" = None, **client_opts: Any, ) -> None: super().__init__() client_opts.setdefault("chunk_size", self.blocksize) self.client = client or Client(base_url, auth=auth, **client_opts) @classmethod def _strip_protocol(cls, path: str) -> str: stripped = super()._strip_protocol(path) return cast(str, stripped) @translate_exceptions() def ls( self, path: str, detail: bool = True, **kwargs: Any ) -> List[Union[str, Dict[str, Any]]]: path = self._strip_protocol(path).strip() data = self.client.ls( path, detail=detail, allow_listing_resource=False ) if not detail: return data return [translate_info(item) for item in data] @translate_exceptions() def info(self, path: str, **kwargs: Any) -> Dict[str, Any]: path = self._strip_protocol(path) return translate_info(self.client.info(path)) @translate_exceptions() def rm_file(self, path: str) -> None: path = self._strip_protocol(path) return self.client.remove(path) _rm = rm_file @translate_exceptions() def cp_file(self, path1: str, path2: str, **kwargs: Any) -> None: path1 = self._strip_protocol(path1) path2 = self._strip_protocol(path2) return self.client.copy(path1, path2) def rmdir(self, path: str) -> None: path = self._strip_protocol(path) if self.ls(path): raise OSError(errno.ENOTEMPTY, "Directory not empty", path) return self.client.remove(path) def rm( self, path: str, recursive: bool = False, maxdepth: int = None ) -> None: path = self._strip_protocol(path) if recursive and not maxdepth and self.isdir(path): return self.rm_file(path) super().rm(path, recursive=recursive, maxdepth=maxdepth) return None def copy( self, path1: str, path2: str, recursive: bool = False, on_error: str = None, **kwargs: Any, ) -> None: path1 = self._strip_protocol(path1) path2 = self._strip_protocol(path2) if recursive and not kwargs.get("maxdepth") and self.isdir(path1): return self.cp_file(path1, path2) if not recursive and self.isdir(path1): return self.makedirs(path2) super().copy( path1, path2, recursive=recursive, on_error=on_error, **kwargs ) return None def mv( self, path1: str, path2: str, recursive: bool = False, maxdepth: bool = None, **kwargs: Any, ) -> None: path1 = self._strip_protocol(path1) path2 = self._strip_protocol(path2) if recursive and not maxdepth and self.isdir(path1): return self.client.move(path1, path2) if not recursive and self.isdir(path1): return self.makedirs(path2) super().mv( path1, path2, recursive=recursive, maxdepth=maxdepth, **kwargs ) return None def _mkdir(self, path: str, exist_ok: bool = False) -> None: try: return self.client.mkdir(path) except ResourceAlreadyExists as exc: details = self.info(path) if details and details["type"] == "directory" and exist_ok: return None raise FileExistsError(errno.EEXIST, "File exists", path) from exc except ResourceConflict as exc: parent = self._parent(path) details = self.info(parent) if details["type"] == "directory": raise raise NotADirectoryError( errno.ENOTDIR, "Not a directory", parent ) from exc def mkdir( self, path: str, create_parents: bool = True, **kwargs: Any ) -> None: path = self._strip_protocol(path) if create_parents: return self.makedirs(path, exist_ok=True) return self._mkdir(path) def makedirs(self, path: str, exist_ok: bool = False) -> None: path = self._strip_protocol(path) parent = self._parent(path) if not ({"", self.root_marker} & {path, parent}) and not self.exists( parent ): self.makedirs(parent, exist_ok=exist_ok) return self._mkdir(path, exist_ok=exist_ok) @translate_exceptions() def created(self, path: str) -> Optional["datetime"]: path = self._strip_protocol(path) return self.client.created(path) @translate_exceptions() def modified(self, path: str) -> Optional["datetime"]: path = self._strip_protocol(path) return self.client.modified(path) @translate_exceptions() def _open( self, path: str, mode: str = "rb", block_size: int = None, autocommit: bool = True, cache_options: Dict[str, str] = None, **kwargs: Any, ) -> Union["WebdavFile", "UploadFile"]: size = kwargs.pop("size", None) assert "a" not in mode if "x" in mode and self.exists(path): raise FileExistsError(errno.EEXIST, "File exists", path) if set(mode) & {"w", "x"}: return UploadFile( self, path=path, mode=mode, block_size=block_size ) return WebdavFile( self, path, block_size=block_size, autocommit=autocommit, mode=mode, size=size, cache_options=cache_options, **kwargs, ) @translate_exceptions() def checksum(self, path: str) -> Optional[str]: path = self._strip_protocol(path) return self.client.etag(path) @translate_exceptions() def size(self, path: str) -> Optional[int]: path = self._strip_protocol(path) return self.client.content_length(path) def sign(self, path: str, expiration: int = 100, **kwargs: Any) -> None: raise NotImplementedError def pipe_file(self, path: str, value: bytes, **kwargs: Any) -> None: buff = io.BytesIO(value) kwargs.setdefault("overwrite", True) return self.upload_fileobj(buff, path, **kwargs) def upload_fileobj( self, fobj: BinaryIO, rpath: str, callback: "Callback" = None, overwrite: bool = True, size: int = None, **kwargs: Any, ) -> None: rpath = self._strip_protocol(rpath) self.mkdirs(os.path.dirname(rpath), exist_ok=True) if size is None: size = peek_filelike_length(fobj) callback = cast("Callback", Callback.as_callback(callback)) if size is not None: callback.set_size(size) progress_callback = cast("CallbackFn", callback.relative_update) return self.client.upload_fileobj( fobj, rpath, overwrite=overwrite, callback=progress_callback, size=size, **kwargs, ) put_fileobj = upload_fileobj def put_file( self, lpath: "PathLike[AnyStr]", rpath: str, callback: "Callback" = None, **kwargs: Any, ) -> None: if os.path.isdir(lpath): rpath = self._strip_protocol(rpath) return self.makedirs(rpath, exist_ok=True) with open(lpath, mode="rb") as fobj: kwargs.setdefault("overwrite", True) kwargs.setdefault("size", None) return self.upload_fileobj( fobj, rpath, callback=callback, **kwargs, ) class WebdavFile(AbstractBufferedFile): size: int def __init__( self, fs: "WebdavFileSystem", path: str, mode: str = "rb", block_size: int = None, autocommit: bool = True, cache_type: str = "readahead", cache_options: Dict[str, str] = None, **kwargs: Any, ) -> None: size = kwargs.get("size") self.details = {"name": path, "size": size, "type": "file"} super().__init__( fs, path, mode=mode, block_size=block_size, autocommit=autocommit, cache_type=cache_type, cache_options=cache_options, **kwargs, ) encoding = kwargs.get("encoding") self.fobj = fs.client.open( self.path, mode=self.mode, encoding=encoding, chunk_size=self.blocksize, ) self.reader: Union[TextIO, BinaryIO] = self.fobj.__enter__() if not self.size: if getattr(self.reader, "size", None): self.size = self.reader.size else: self.size = self.fs.size(self.path) self.closed: bool = False
MIT License
potash/drain
drain/exploration.py
_print_unhashable
python
def _print_unhashable(df, columns=None): for c in df.columns if columns is None else columns: if df.dtypes[c] == object: try: df[c].apply(hash) except TypeError: df[c] = df[c].dropna().apply(pformat).ix[df.index] return df
Replace unhashable values in a DataFrame with their string repr Args: df: DataFrame columns: columns to replace, if necessary. Default None replaces all columns.
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/exploration.py#L272-L286
from tempfile import NamedTemporaryFile from pprint import pformat from itertools import product from sklearn import tree import pandas as pd from collections import Counter from six import StringIO from drain import util, step def explore(steps, reload=False): return StepFrame(index=step.load(steps, reload=reload)) def expand(self, prefix=False, index=True, diff=True, existence=True): dicts = [step._collect_kwargs(s, drop_duplicate_names=True) for s in self.index] dicts = [{k: util.dict_expand(v) for k, v in s.items()} for s in dicts] if diff: diff_dicts = [{} for d in dicts] names = util.union([set(d.keys()) for d in dicts]) for name in names: if existence: ndicts = [d[name] for d in dicts if name in d.keys()] else: ndicts = [d[name] if name in d.keys() else {} for d in dicts] ndiffs = util.dict_diff(ndicts) if sum(map(len, ndiffs)) == 0: if existence and len(ndicts) < len(self): for m, d in zip(diff_dicts, dicts): m[name] = {tuple(): name in d.keys()} else: diff_iter = iter(ndiffs) for m, d in zip(diff_dicts, dicts): if name in d.keys() or not existence: m[name] = diff_iter.next() dicts = diff_dicts merged_dicts = [] for dd in dicts: merged_dicts.append(util.dict_merge(*({tuple([name] + list(util.make_tuple(k))): v for k, v in d.items()} for name, d in dd.items()))) keys = [list((k[1:] for k in d.keys())) for d in merged_dicts] if not prefix: key_count = [Counter(kk) for kk in keys] prefix_keys = util.union({k for k in c if c[k] > 1} for c in key_count) else: prefix_keys = util.union((set(kk) for kk in keys)) merged_dicts = [{str.join('_', map(str, k if k[1:] in prefix_keys else k[1:])): v for k, v in d.items()} for d in merged_dicts] expanded = pd.DataFrame(merged_dicts, index=self.index) if index: columns = list(expanded.columns) try: if len(columns) > 0: expanded.set_index(columns, inplace=True) else: expanded.index = [None]*len(expanded) except TypeError: _print_unhashable(expanded, columns) expanded.set_index(columns, inplace=True) df = self.__class__.__bases__[0](self, copy=True) df.index = expanded.index else: df = pd.concat((expanded, self), axis=1) df = StepFrame(expanded) return df def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for fn, kw in search: if not pairwise: r = self.index.to_series().apply(lambda step: fn(step, **kw)) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = [] if len(functions) == 1 else [fn.__name__] name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples( [tuple(name + util.make_list(c)) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if len(results) > 1: result = pd.concat(results, axis=1) column_names = [] if len(functions) == 1 else [None] column_names += search_keys column_names += [None]*(len(result.columns.names)-len(column_names)) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result) def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs): steps = self.index r = pd.DataFrame(index=steps, columns=steps) for i, s1 in enumerate(steps): j = range(i+1 if symmetric else len(steps)) if not diagonal: j.remove(i) other = set(steps[j]) if block is not None: df = self.reset_index() df = df.merge(df, on=block) other &= set(df[df.index_x == s1].index_y) for s2 in other: r.ix[s1, s2] = function(s1, s2, **kwargs) return r def _assert_step_collection(steps): for s in steps: if not isinstance(s, step.Step): raise ValueError("StepFrame index must consist of drain.step.Step objects") if len(set(steps)) != len(steps): raise ValueError("StepFrame steps must be unique") class StepFrame(pd.DataFrame): expand = expand dapply = dapply def __init__(self, *args, **kwargs): pd.DataFrame.__init__(self, *args, **kwargs) _assert_step_collection(self.index.values) @property def _constructor(self): return StepFrame @property def _contructor_sliced(self): return pd.Series def __str__(self): return self.expand().__str__() def to_html(self, *args, **kwargs): return self.expand().to_html(*args, **kwargs) def reset_index(self, *args, **kwargs): return pd.DataFrame(self).reset_index(*args, **kwargs) class StepSeries(pd.Series): expand = expand dapply = dapply def __init__(self, *args, **kwargs): pd.Series.__init__(self, *args, **kwargs) _assert_step_collection(self.index.values) @property def _constructor(self): return StepSeries @property def _contructor_expanddim(self): return StepFrame def __str__(self): return self.expand().__str__() def to_html(self, *args, **kwargs): return self.expand().to_html(*args, **kwargs) def reset_index(self, *args, **kwargs): return pd.Series(self).reset_index(*args, **kwargs)
MIT License
synbiodex/pysbol2
sbol2/partshop.py
PartShop.downloadAttachment
python
def downloadAttachment(self, attachment_uri, filepath='.'): url = self._uri2url(attachment_uri) url = posixpath.join(url, 'download') filepath = os.path.expanduser(filepath) headers = { 'Accept': 'text/plain', 'X-authorization': self.key } response = requests.get(url, headers=headers) if response.ok: filename = response.headers['Content-Disposition'] filename = filename[22:-1] if os.path.isdir(filepath): filepath = posixpath.join(filepath, filename) with open(filepath, 'wb') as filehandle: filehandle.write(response.content) return if response.status_code == http.HTTPStatus.UNAUTHORIZED: msg = 'You must login with valid credentials before downloading a file' raise SBOLError(SBOLErrorCode.SBOL_ERROR_HTTP_UNAUTHORIZED, msg) if response.status_code == http.HTTPStatus.NOT_FOUND: msg = 'Unable to download. Attachment {} not found.'.format(attachment_uri) raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, msg) msg = 'HTTP Error code {} trying to download file.' msg = msg.format(response.status_code) raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, msg)
Download a file attachment from SynBioHub. :param attachment_uri: The URI of the file to download :param filepath: The local path or filename to which the file will be written Returns None if successful. Raises EnvironmentError if the filepath is invalid Raises SBOLError with code SBOL_ERROR_HTTP_UNAUTHORIZED if it there is an HTTP Unauthorized response. Raises SBOLError with code SBOL_ERROR_BAD_HTTP_REQUEST on any other HTTP error. The actual status code is embedded in the string message.
https://github.com/synbiodex/pysbol2/blob/127b92d60ecf6f9b6cb8fbf9657bb578bc983090/sbol2/partshop.py#L347-L394
import getpass import http import logging import os import posixpath from typing import List, Optional, Union import urllib.parse import requests import urllib3.exceptions from .config import Config, parseClassName from .config import ConfigOptions from .config import parseURLDomain from .constants import * from .sbolerror import SBOLError from .sbolerror import SBOLErrorCode from .identified import Identified from . import SearchQuery class PartShop: def __init__(self, url, spoofed_url=''): self.resource = self._validate_url(url, 'resource') self.user = '' self.key = '' self.spoofed_resource = self._validate_url(spoofed_url, 'spoofed') def _validate_url(self, url, url_name): if url_name == "spoofed" and url == '': return url if not isinstance(url, str): msg = ('PartShop initialization failed. The {} URL ' + 'is not of type string').format(url_name) raise SBOLError(SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT, msg) url_pieces = urllib.parse.urlparse(url) if not all([url_pieces.scheme in ['http', 'https'], url_pieces.netloc]): msg = ('PartShop initialization failed. The {} URL ' + 'was not valid').format(url_name) raise SBOLError(SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT, msg) if len(url) > 0 and url[-1] == '/': msg = ('PartShop initialization failed. The {} URL ' + 'should not contain a terminal forward slash') msg = msg.format(url_name) raise SBOLError(SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT, msg) return url @property def logger(self): logger = logging.getLogger('sbol2') if not logger.hasHandlers(): logging.basicConfig() return logger def count(self): raise NotImplementedError('Not yet implemented') def spoof(self, spoofed_url): self.spoofed_resource = self._validate_url(spoofed_url, 'spoofed') def sparqlQuery(self, query): endpoint = parseURLDomain(self.resource) + '/sparql' if self.spoofed_resource == '': resource = self.resource else: resource = self.spoofed_resource p = query.find('WHERE') if p != -1: from_clause = ' FROM <' + parseURLDomain(resource) + '/user/' + self.user + '> ' query = query[:p].rstrip() + from_clause + query[p:].lstrip() headers = {'X-authorization': self.key, 'Accept': 'application/json'} params = {'query': query} if Config.getOption(ConfigOptions.VERBOSE.value) is True: self.logger.debug('Issuing SPARQL: ' + query) response = requests.get(endpoint, headers=headers, params=params) if not response: raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, response) return response def pull(self, uris, doc, recursive=True): endpoints = [] if type(uris) is str: endpoints.append(uris) elif type(uris) is list: endpoints = uris else: raise TypeError('URIs must be str or list. Found: ' + str(type(uris))) for uri in endpoints: try: query = self._uri2url(uri) except SBOLError as err: if err.error_code() == SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT: query = self.resource + '/' + uri else: raise query += '/sbol' if not recursive: query += 'nr' if Config.getOption(ConfigOptions.VERBOSE.value): self.logger.debug('Issuing GET request ' + query) response = requests.get(query, headers={'X-authorization': self.key, 'Accept': 'text/plain'}) if response.status_code == 404: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Part not found. Unable to pull: ' + query) elif response.status_code == 401: raise SBOLError(SBOLErrorCode.SBOL_ERROR_HTTP_UNAUTHORIZED, 'Please log in with valid credentials') elif not response: raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, response) doc.appendString(response.content, overwrite=True) doc.resource_namespaces.add(self.resource) def submit(self, doc, collection='', overwrite=0): if collection == '': if len(doc.displayId) == 0 or len(doc.name) == 0 or len(doc.description) == 0: raise SBOLError(SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT, 'Cannot submit Document. The Document must be ' 'assigned a displayId, name, and ' + 'description for upload.') else: if len(self.spoofed_resource) > 0 and self.resource in collection: collection = collection.replace(self.resource, self.spoofed_resource) if Config.getOption(ConfigOptions.VERBOSE.value) is True: self.logger.info('Submitting Document to an existing collection: %s', collection) files = {} if len(doc.displayId) > 0: files['id'] = (None, doc.displayId) if len(doc.version) > 0: files['version'] = (None, doc.version) if doc.name and len(doc.name) > 0: files['name'] = (None, doc.name) if doc.description and len(doc.description) > 0: files['description'] = (None, doc.description) citations = '' for citation in doc.citations: citations += citation + ',' citations = citations[0:-1] files['citations'] = (None, citations) keywords = '' for kw in doc.keywords: keywords += kw + ',' keywords = keywords[0:-1] files['keywords'] = (None, keywords) files['overwrite_merge'] = (None, str(overwrite)) files['user'] = (None, self.key) files['file'] = ('file', doc.writeString(), 'text/xml') if collection != '': files['rootCollections'] = (None, collection) response = requests.post(self.resource + '/submit', files=files, headers={'Accept': 'text/plain', 'X-authorization': self.key}) if response: return response elif response.status_code == 401: raise urllib3.exceptions.HTTPError('You must login with valid credentials ' 'before submitting') else: raise urllib3.exceptions.HTTPError('HTTP post request failed with: ' + str(response.status_code) + ' - ' + str(response.content)) def _uri2url(self, uri): if self.resource in uri: return uri if parseURLDomain(self.resource) in uri: return uri if self.spoofed_resource and self.spoofed_resource in uri: return uri.replace(self.spoofed_resource, self.resource) msg = ('{} does not exist in the resource namespace') msg = msg.format(uri) raise SBOLError(SBOLErrorCode.SBOL_ERROR_INVALID_ARGUMENT, msg) def remove(self, uri): query = self._uri2url(uri) url = '{}/remove'.format(query) headers = { 'X-authorization': self.key, 'Accept': 'application/json' } response = requests.get(url, headers=headers) if response.ok: return True if response.status_code == requests.codes.unauthorized: msg = 'You must login with valid credentials before removing' raise SBOLError(SBOLErrorCode.SBOL_ERROR_HTTP_UNAUTHORIZED, msg) msg = 'Unknown error: ' + response raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, msg) def login(self, user_id, password=''): self.user = user_id if password is None or password == '': password = getpass.getpass() response = requests.post( parseURLDomain(self.resource) + '/remoteLogin', data={'email': user_id, 'password': password}, headers={'Content-Type': 'application/x-www-form-urlencoded'} ) if not response: msg = 'Login failed due to an HTTP error: {}' msg = msg.format(response) raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, msg) self.key = response.content.decode('utf-8') return response def getKey(self): return self.key def getURL(self): return self.resource def getUser(self): return self.user def getSpoofedURL(self): return self.spoofed_resource def attachFile(self, top_level_uri, filepath): filepath = os.path.expanduser(filepath) headers = { 'Accept': 'text/plain', 'X-authorization': self.key } url = posixpath.join(top_level_uri, 'attach') with open(filepath, 'rb') as fp: files = {'file': fp} response = requests.post(url, headers=headers, files=files) if response.ok: if Config.getOption(ConfigOptions.VERBOSE.value) is True: print(response.text) return if response.status_code == http.HTTPStatus.UNAUTHORIZED: msg = 'You must login with valid credentials before attaching a file' raise SBOLError(SBOLErrorCode.SBOL_ERROR_HTTP_UNAUTHORIZED, msg) msg = 'HTTP Error code {} trying to attach file.' msg = msg.format(response.status_code) raise SBOLError(SBOLErrorCode.SBOL_ERROR_BAD_HTTP_REQUEST, msg)
Apache License 2.0
awslabs/autogluon
core/src/autogluon/core/searcher/bayesopt/datatypes/config_ext.py
ExtendedConfiguration.get
python
def get(self, config: CS.Configuration, resource: int) -> CS.Configuration: values = copy.deepcopy(config.get_dictionary()) values[self.resource_attr_name] = resource return CS.Configuration(self.hp_ranges_ext.config_space, values=values)
Create extended config with resource added. :param config: :param resource: :return: Extended config
https://github.com/awslabs/autogluon/blob/e26e7b23f17fac9f5fb761096a6a49fe94de496b/core/src/autogluon/core/searcher/bayesopt/datatypes/config_ext.py#L44-L54
from typing import Tuple, Union import ConfigSpace as CS import ConfigSpace.hyperparameters as CSH import copy from .hp_ranges_cs import HyperparameterRanges_CS RESOURCE_ATTR_PREFIX = 'RESOURCE_ATTR_' class ExtendedConfiguration(object): def __init__( self, hp_ranges: HyperparameterRanges_CS, resource_attr_key: str, resource_attr_range: Tuple[int, int]): assert resource_attr_range[0] >= 1 assert resource_attr_range[1] >= resource_attr_range[0] self.hp_ranges = hp_ranges self.resource_attr_key = resource_attr_key self.resource_attr_range = resource_attr_range config_space_ext = copy.deepcopy(hp_ranges.config_space) self.resource_attr_name = RESOURCE_ATTR_PREFIX + resource_attr_key config_space_ext.add_hyperparameter(CSH.UniformIntegerHyperparameter( name=self.resource_attr_name, lower=1, upper=resource_attr_range[1])) self.hp_ranges_ext = HyperparameterRanges_CS( config_space_ext, name_last_pos=self.resource_attr_name)
Apache License 2.0
lizhaokun/autosub-with-baidu-deepspeech2
model_utils/model.py
DeepSpeech2Model.init_ext_scorer
python
def init_ext_scorer(self, beam_alpha, beam_beta, language_model_path, vocab_list): if language_model_path != '': self.logger.info("begin to initialize the external scorer " "for decoding") self._ext_scorer = Scorer(beam_alpha, beam_beta, language_model_path, vocab_list) lm_char_based = self._ext_scorer.is_character_based() lm_max_order = self._ext_scorer.get_max_order() lm_dict_size = self._ext_scorer.get_dict_size() self.logger.info("language model: " "is_character_based = %d," % lm_char_based + " max_order = %d," % lm_max_order + " dict_size = %d" % lm_dict_size) self.logger.info("end initializing scorer") else: self._ext_scorer = None self.logger.info("no language model provided, " "decoding by pure beam search without scorer.")
Initialize the external scorer. :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param language_model_path: Filepath for language model. If it is empty, the external scorer will be set to None, and the decoding method will be pure beam search without scorer. :type language_model_path: basestring|None :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list
https://github.com/lizhaokun/autosub-with-baidu-deepspeech2/blob/20acbcbe1e3b8b83f107af4e28eebbbebba356f2/model_utils/model.py#L226-L258
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import time import logging import gzip import copy import inspect from distutils.dir_util import mkpath import paddle.v2 as paddle from decoders.swig_wrapper import Scorer from decoders.swig_wrapper import ctc_greedy_decoder from decoders.swig_wrapper import ctc_beam_search_decoder_batch from model_utils.network import deep_speech_v2_network logging.basicConfig( format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s') class DeepSpeech2Model(object): def __init__(self, vocab_size, num_conv_layers, num_rnn_layers, rnn_layer_size, use_gru, pretrained_model_path, share_rnn_weights): self._create_network(vocab_size, num_conv_layers, num_rnn_layers, rnn_layer_size, use_gru, share_rnn_weights) self._create_parameters(pretrained_model_path) self._inferer = None self._loss_inferer = None self._ext_scorer = None self._num_conv_layers = num_conv_layers self.logger = logging.getLogger("") self.logger.setLevel(level=logging.INFO) def train(self, train_batch_reader, dev_batch_reader, feeding_dict, learning_rate, gradient_clipping, num_passes, output_model_dir, is_local=True, num_iterations_print=100, test_off=False): if not os.path.exists(output_model_dir): mkpath(output_model_dir) adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict) adapted_train_batch_reader = self._adapt_data(train_batch_reader) adapted_dev_batch_reader = self._adapt_data(dev_batch_reader) optimizer = paddle.optimizer.Adam( learning_rate=learning_rate, gradient_clipping_threshold=gradient_clipping) trainer = paddle.trainer.SGD( cost=self._loss, parameters=self._parameters, update_equation=optimizer, is_local=is_local) def event_handler(event): global start_time, cost_sum, cost_counter if isinstance(event, paddle.event.EndIteration): cost_sum += event.cost cost_counter += 1 if (event.batch_id + 1) % num_iterations_print == 0: output_model_path = os.path.join(output_model_dir, "params.latest.tar.gz") with gzip.open(output_model_path, 'w') as f: trainer.save_parameter_to_tar(f) print("\nPass: %d, Batch: %d, TrainCost: %f" % (event.pass_id, event.batch_id + 1, cost_sum / cost_counter)) cost_sum, cost_counter = 0.0, 0 else: sys.stdout.write('.') sys.stdout.flush() if isinstance(event, paddle.event.BeginPass): start_time = time.time() cost_sum, cost_counter = 0.0, 0 if isinstance(event, paddle.event.EndPass): if test_off: print("\n------- Time: %d sec, Pass: %d" % (time.time() - start_time, event.pass_id)) else: result = trainer.test( reader=adapted_dev_batch_reader, feeding=adapted_feeding_dict) print( "\n------- Time: %d sec, Pass: %d, " "ValidationCost: %s" % (time.time() - start_time, event.pass_id, result.cost)) output_model_path = os.path.join( output_model_dir, "params.pass-%d.tar.gz" % event.pass_id) with gzip.open(output_model_path, 'w') as f: trainer.save_parameter_to_tar(f) trainer.train( reader=adapted_train_batch_reader, event_handler=event_handler, num_passes=num_passes, feeding=adapted_feeding_dict) def infer_loss_batch(self, infer_data): if self._loss_inferer == None: self._loss_inferer = paddle.inference.Inference( output_layer=self._loss, parameters=self._parameters) return self._loss_inferer.infer(input=infer_data) def infer_batch_probs(self, infer_data, feeding_dict): if self._inferer == None: self._inferer = paddle.inference.Inference( output_layer=self._log_probs, parameters=self._parameters) adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict) adapted_infer_data = self._adapt_data(infer_data) infer_results = self._inferer.infer( input=adapted_infer_data, feeding=adapted_feeding_dict) start_pos = [0] * (len(adapted_infer_data) + 1) for i in xrange(len(adapted_infer_data)): start_pos[i + 1] = start_pos[i] + adapted_infer_data[i][3][0] probs_split = [ infer_results[start_pos[i]:start_pos[i + 1]] for i in xrange(0, len(adapted_infer_data)) ] return probs_split def decode_batch_greedy(self, probs_split, vocab_list): results = [] for i, probs in enumerate(probs_split): output_transcription = ctc_greedy_decoder( probs_seq=probs, vocabulary=vocab_list) results.append(output_transcription) return results
Apache License 2.0
aliyun/aliyun-log-python-sdk
aliyun/log/getlogsrequest.py
GetLogsRequest.get_line
python
def get_line(self): return self.line
Get max line number of return logs :return: int, max line number of return logs
https://github.com/aliyun/aliyun-log-python-sdk/blob/49b7b92798729d962268252dbbae9d7c098e60f8/aliyun/log/getlogsrequest.py#L132-L137
from .logrequest import LogRequest class GetLogsRequest(LogRequest): def __init__(self, project=None, logstore=None, fromTime=None, toTime=None, topic=None, query=None, line=100, offset=0, reverse=False, power_sql=False): LogRequest.__init__(self, project) self.logstore = logstore self.fromTime = fromTime self.toTime = toTime self.topic = topic self.query = query self.line = line self.offset = offset self.reverse = reverse self.power_sql = power_sql def get_logstore(self): return self.logstore if self.logstore else '' def set_logstore(self, logstore): self.logstore = logstore def get_topic(self): return self.topic if self.topic else '' def set_topic(self, topic): self.topic = topic def get_from(self): return self.fromTime def set_from(self, fromTime): self.fromTime = fromTime def get_to(self): return self.toTime def set_to(self, toTime): self.toTime = toTime def get_query(self): return self.query def set_query(self, query): self.query = query
MIT License
airtestproject/airtest
airtest/core/android/adb.py
ADB.start_cmd
python
def start_cmd(self, cmds, device=True): if device: if not self.serialno: raise RuntimeError("please set serialno first") cmd_options = self.cmd_options + ['-s', self.serialno] else: cmd_options = self.cmd_options cmds = cmd_options + split_cmd(cmds) LOGGING.debug(" ".join(cmds)) if not PY3: cmds = [c.encode(get_std_encoding(sys.stdin)) for c in cmds] proc = subprocess.Popen( cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=SUBPROCESS_FLAG ) return proc
Start a subprocess with adb command(s) Args: cmds: command(s) to be run device: if True, the device serial number must be specified by `-s serialno` argument Raises: RuntimeError: if `device` is True and serialno is not specified Returns: a subprocess
https://github.com/airtestproject/airtest/blob/c29d0462fe29db5c04cda31de1c05bcae5991061/airtest/core/android/adb.py#L124-L159
import os import re import sys import time import random import platform import warnings import subprocess import threading from copy import copy from six import PY3, text_type, binary_type, raise_from from six.moves import reduce from airtest.core.android.constant import (DEFAULT_ADB_PATH, IP_PATTERN, SDK_VERISON_ANDROID7) from airtest.core.error import (AdbError, AdbShellError, AirtestError, DeviceConnectionError) from airtest.utils.compat import decode_path, raisefrom, proc_communicate_timeout, SUBPROCESS_FLAG from airtest.utils.logger import get_logger from airtest.utils.nbsp import NonBlockingStreamReader from airtest.utils.retry import retries from airtest.utils.snippet import get_std_encoding, reg_cleanup, split_cmd, make_file_executable LOGGING = get_logger(__name__) class ADB(object): _instances = [] status_device = "device" status_offline = "offline" SHELL_ENCODING = "utf-8" def __init__(self, serialno=None, adb_path=None, server_addr=None, display_id=None, input_event=None): self.serialno = serialno self.adb_path = adb_path or self.builtin_adb_path() self.display_id = display_id self.input_event = input_event self._set_cmd_options(server_addr) self.connect() self._sdk_version = None self._line_breaker = None self._display_info = {} self._display_info_lock = threading.Lock() self._forward_local_using = [] self.__class__._instances.append(self) @staticmethod def builtin_adb_path(): system = platform.system() machine = platform.machine() adb_path = DEFAULT_ADB_PATH.get('{}-{}'.format(system, machine)) if not adb_path: adb_path = DEFAULT_ADB_PATH.get(system) if not adb_path: raise RuntimeError("No adb executable supports this platform({}-{}).".format(system, machine)) if "ANDROID_HOME" in os.environ: del os.environ["ANDROID_HOME"] if system != "Windows": make_file_executable(adb_path) return adb_path def _set_cmd_options(self, server_addr=None): self.host = server_addr[0] if server_addr else "127.0.0.1" self.port = server_addr[1] if server_addr else 5037 self.cmd_options = [self.adb_path] if self.host not in ("localhost", "127.0.0.1"): self.cmd_options += ['-H', self.host] if self.port != 5037: self.cmd_options += ['-P', str(self.port)] def start_server(self): return self.cmd("start-server", device=False) def kill_server(self): return self.cmd("kill-server", device=False) def version(self): return self.cmd("version", device=False).strip()
Apache License 2.0
mcs07/pubchempy
pubchempy.py
Compound.from_cid
python
def from_cid(cls, cid, **kwargs): record = json.loads(request(cid, **kwargs).read().decode())['PC_Compounds'][0] return cls(record)
Retrieve the Compound record for the specified CID. Usage:: c = Compound.from_cid(6819) :param int cid: The PubChem Compound Identifier (CID).
https://github.com/mcs07/pubchempy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L717-L727
from __future__ import print_function from __future__ import unicode_literals from __future__ import division import functools import json import logging import os import sys import time import warnings import binascii try: from urllib.error import HTTPError from urllib.parse import quote, urlencode from urllib.request import urlopen except ImportError: from urllib import urlencode from urllib2 import quote, urlopen, HTTPError try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest __author__ = 'Matt Swain' __email__ = 'm.swain@me.com' __version__ = '1.0.4' __license__ = 'MIT' API_BASE = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug' log = logging.getLogger('pubchempy') log.addHandler(logging.NullHandler()) if sys.version_info[0] == 3: text_types = str, bytes else: text_types = basestring, class CompoundIdType(object): DEPOSITED = 0 STANDARDIZED = 1 COMPONENT = 2 NEUTRALIZED = 3 MIXTURE = 4 TAUTOMER = 5 IONIZED = 6 UNKNOWN = 255 class BondType(object): SINGLE = 1 DOUBLE = 2 TRIPLE = 3 QUADRUPLE = 4 DATIVE = 5 COMPLEX = 6 IONIC = 7 UNKNOWN = 255 class CoordinateType(object): TWO_D = 1 THREE_D = 2 SUBMITTED = 3 EXPERIMENTAL = 4 COMPUTED = 5 STANDARDIZED = 6 AUGMENTED = 7 ALIGNED = 8 COMPACT = 9 UNITS_ANGSTROMS = 10 UNITS_NANOMETERS = 11 UNITS_PIXEL = 12 UNITS_POINTS = 13 UNITS_STDBONDS = 14 UNITS_UNKNOWN = 255 class ProjectCategory(object): MLSCN = 1 MPLCN = 2 MLSCN_AP = 3 MPLCN_AP = 4 JOURNAL_ARTICLE = 5 ASSAY_VENDOR = 6 LITERATURE_EXTRACTED = 7 LITERATURE_AUTHOR = 8 LITERATURE_PUBLISHER = 9 RNAIGI = 10 OTHER = 255 ELEMENTS = { 1: 'H', 2: 'He', 3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne', 11: 'Na', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar', 19: 'K', 20: 'Ca', 21: 'Sc', 22: 'Ti', 23: 'V', 24: 'Cr', 25: 'Mn', 26: 'Fe', 27: 'Co', 28: 'Ni', 29: 'Cu', 30: 'Zn', 31: 'Ga', 32: 'Ge', 33: 'As', 34: 'Se', 35: 'Br', 36: 'Kr', 37: 'Rb', 38: 'Sr', 39: 'Y', 40: 'Zr', 41: 'Nb', 42: 'Mo', 43: 'Tc', 44: 'Ru', 45: 'Rh', 46: 'Pd', 47: 'Ag', 48: 'Cd', 49: 'In', 50: 'Sn', 51: 'Sb', 52: 'Te', 53: 'I', 54: 'Xe', 55: 'Cs', 56: 'Ba', 57: 'La', 58: 'Ce', 59: 'Pr', 60: 'Nd', 61: 'Pm', 62: 'Sm', 63: 'Eu', 64: 'Gd', 65: 'Tb', 66: 'Dy', 67: 'Ho', 68: 'Er', 69: 'Tm', 70: 'Yb', 71: 'Lu', 72: 'Hf', 73: 'Ta', 74: 'W', 75: 'Re', 76: 'Os', 77: 'Ir', 78: 'Pt', 79: 'Au', 80: 'Hg', 81: 'Tl', 82: 'Pb', 83: 'Bi', 84: 'Po', 85: 'At', 86: 'Rn', 87: 'Fr', 88: 'Ra', 89: 'Ac', 90: 'Th', 91: 'Pa', 92: 'U', 93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk', 98: 'Cf', 99: 'Es', 100: 'Fm', 101: 'Md', 102: 'No', 103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', 107: 'Bh', 108: 'Hs', 109: 'Mt', 110: 'Ds', 111: 'Rg', 112: 'Cp', 113: 'ut', 114: 'uq', 115: 'up', 116: 'uh', 117: 'us', 118: 'uo', } def request(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None, **kwargs): if not identifier: raise ValueError('identifier/cid cannot be None') if isinstance(identifier, int): identifier = str(identifier) if not isinstance(identifier, text_types): identifier = ','.join(str(x) for x in identifier) kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) urlid, postdata = None, None if namespace == 'sourceid': identifier = identifier.replace('/', '.') if namespace in ['listkey', 'formula', 'sourceid'] or searchtype == 'xref' or (searchtype and namespace == 'cid') or domain == 'sources': urlid = quote(identifier.encode('utf8')) else: postdata = urlencode([(namespace, identifier)]).encode('utf8') comps = filter(None, [API_BASE, domain, searchtype, namespace, urlid, operation, output]) apiurl = '/'.join(comps) if kwargs: apiurl += '?%s' % urlencode(kwargs) try: log.debug('Request URL: %s', apiurl) log.debug('Request data: %s', postdata) response = urlopen(apiurl, postdata) return response except HTTPError as e: raise PubChemHTTPError(e) def get(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None, **kwargs): if (searchtype and searchtype != 'xref') or namespace in ['formula']: response = request(identifier, namespace, domain, None, 'JSON', searchtype, **kwargs).read() status = json.loads(response.decode()) if 'Waiting' in status and 'ListKey' in status['Waiting']: identifier = status['Waiting']['ListKey'] namespace = 'listkey' while 'Waiting' in status and 'ListKey' in status['Waiting']: time.sleep(2) response = request(identifier, namespace, domain, operation, 'JSON', **kwargs).read() status = json.loads(response.decode()) if not output == 'JSON': response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read() else: response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read() return response def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs): try: return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode()) except NotFoundError as e: log.info(e) return None def get_sdf(identifier, namespace='cid', domain='compound',operation=None, searchtype=None, **kwargs): try: return get(identifier, namespace, domain, operation, 'SDF', searchtype, **kwargs).decode() except NotFoundError as e: log.info(e) return None def get_compounds(identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs): results = get_json(identifier, namespace, searchtype=searchtype, **kwargs) compounds = [Compound(r) for r in results['PC_Compounds']] if results else [] if as_dataframe: return compounds_to_frame(compounds) return compounds def get_substances(identifier, namespace='sid', as_dataframe=False, **kwargs): results = get_json(identifier, namespace, 'substance', **kwargs) substances = [Substance(r) for r in results['PC_Substances']] if results else [] if as_dataframe: return substances_to_frame(substances) return substances def get_assays(identifier, namespace='aid', **kwargs): results = get_json(identifier, namespace, 'assay', 'description', **kwargs) return [Assay(r) for r in results['PC_AssayContainer']] if results else [] PROPERTY_MAP = { 'molecular_formula': 'MolecularFormula', 'molecular_weight': 'MolecularWeight', 'canonical_smiles': 'CanonicalSMILES', 'isomeric_smiles': 'IsomericSMILES', 'inchi': 'InChI', 'inchikey': 'InChIKey', 'iupac_name': 'IUPACName', 'xlogp': 'XLogP', 'exact_mass': 'ExactMass', 'monoisotopic_mass': 'MonoisotopicMass', 'tpsa': 'TPSA', 'complexity': 'Complexity', 'charge': 'Charge', 'h_bond_donor_count': 'HBondDonorCount', 'h_bond_acceptor_count': 'HBondAcceptorCount', 'rotatable_bond_count': 'RotatableBondCount', 'heavy_atom_count': 'HeavyAtomCount', 'isotope_atom_count': 'IsotopeAtomCount', 'atom_stereo_count': 'AtomStereoCount', 'defined_atom_stereo_count': 'DefinedAtomStereoCount', 'undefined_atom_stereo_count': 'UndefinedAtomStereoCount', 'bond_stereo_count': 'BondStereoCount', 'defined_bond_stereo_count': 'DefinedBondStereoCount', 'undefined_bond_stereo_count': 'UndefinedBondStereoCount', 'covalent_unit_count': 'CovalentUnitCount', 'volume_3d': 'Volume3D', 'conformer_rmsd_3d': 'ConformerModelRMSD3D', 'conformer_model_rmsd_3d': 'ConformerModelRMSD3D', 'x_steric_quadrupole_3d': 'XStericQuadrupole3D', 'y_steric_quadrupole_3d': 'YStericQuadrupole3D', 'z_steric_quadrupole_3d': 'ZStericQuadrupole3D', 'feature_count_3d': 'FeatureCount3D', 'feature_acceptor_count_3d': 'FeatureAcceptorCount3D', 'feature_donor_count_3d': 'FeatureDonorCount3D', 'feature_anion_count_3d': 'FeatureAnionCount3D', 'feature_cation_count_3d': 'FeatureCationCount3D', 'feature_ring_count_3d': 'FeatureRingCount3D', 'feature_hydrophobe_count_3d': 'FeatureHydrophobeCount3D', 'effective_rotor_count_3d': 'EffectiveRotorCount3D', 'conformer_count_3d': 'ConformerCount3D', } def get_properties(properties, identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs): if isinstance(properties, text_types): properties = properties.split(',') properties = ','.join([PROPERTY_MAP.get(p, p) for p in properties]) properties = 'property/%s' % properties results = get_json(identifier, namespace, 'compound', properties, searchtype=searchtype, **kwargs) results = results['PropertyTable']['Properties'] if results else [] if as_dataframe: import pandas as pd return pd.DataFrame.from_records(results, index='CID') return results def get_synonyms(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs): results = get_json(identifier, namespace, domain, 'synonyms', searchtype=searchtype, **kwargs) return results['InformationList']['Information'] if results else [] def get_cids(identifier, namespace='name', domain='compound', searchtype=None, **kwargs): results = get_json(identifier, namespace, domain, 'cids', searchtype=searchtype, **kwargs) if not results: return [] elif 'IdentifierList' in results: return results['IdentifierList']['CID'] elif 'InformationList' in results: return results['InformationList']['Information'] def get_sids(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs): results = get_json(identifier, namespace, domain, 'sids', searchtype=searchtype, **kwargs) if not results: return [] elif 'IdentifierList' in results: return results['IdentifierList']['SID'] elif 'InformationList' in results: return results['InformationList']['Information'] def get_aids(identifier, namespace='cid', domain='compound', searchtype=None, **kwargs): results = get_json(identifier, namespace, domain, 'aids', searchtype=searchtype, **kwargs) if not results: return [] elif 'IdentifierList' in results: return results['IdentifierList']['AID'] elif 'InformationList' in results: return results['InformationList']['Information'] def get_all_sources(domain='substance'): results = json.loads(get(domain, None, 'sources').decode()) return results['InformationList']['SourceName'] def download(outformat, path, identifier, namespace='cid', domain='compound', operation=None, searchtype=None, overwrite=False, **kwargs): response = get(identifier, namespace, domain, operation, outformat, searchtype, **kwargs) if not overwrite and os.path.isfile(path): raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % path) with open(path, 'wb') as f: f.write(response) def memoized_property(fget): attr_name = '_{0}'.format(fget.__name__) @functools.wraps(fget) def fget_memoized(self): if not hasattr(self, attr_name): setattr(self, attr_name, fget(self)) return getattr(self, attr_name) return property(fget_memoized) def deprecated(message=None): def deco(func): @functools.wraps(func) def wrapped(*args, **kwargs): warnings.warn( message or 'Call to deprecated function {}'.format(func.__name__), category=PubChemPyDeprecationWarning, stacklevel=2 ) return func(*args, **kwargs) return wrapped return deco class Atom(object): def __init__(self, aid, number, x=None, y=None, z=None, charge=0): self.aid = aid self.number = number self.x = x self.y = y self.z = z self.charge = charge def __repr__(self): return 'Atom(%s, %s)' % (self.aid, self.element) def __eq__(self, other): return (isinstance(other, type(self)) and self.aid == other.aid and self.element == other.element and self.x == other.x and self.y == other.y and self.z == other.z and self.charge == other.charge) @deprecated('Dictionary style access to Atom attributes is deprecated') def __getitem__(self, prop): if prop in {'element', 'x', 'y', 'z', 'charge'}: return getattr(self, prop) raise KeyError(prop) @deprecated('Dictionary style access to Atom attributes is deprecated') def __setitem__(self, prop, val): setattr(self, prop, val) @deprecated('Dictionary style access to Atom attributes is deprecated') def __contains__(self, prop): if prop in {'element', 'x', 'y', 'z', 'charge'}: return getattr(self, prop) is not None return False @property def element(self): return ELEMENTS.get(self.number, None) def to_dict(self): data = {'aid': self.aid, 'number': self.number, 'element': self.element} for coord in {'x', 'y', 'z'}: if getattr(self, coord) is not None: data[coord] = getattr(self, coord) if self.charge is not 0: data['charge'] = self.charge return data def set_coordinates(self, x, y, z=None): self.x = x self.y = y self.z = z @property def coordinate_type(self): return '2d' if self.z is None else '3d' class Bond(object): def __init__(self, aid1, aid2, order=BondType.SINGLE, style=None): self.aid1 = aid1 self.aid2 = aid2 self.order = order self.style = style def __repr__(self): return 'Bond(%s, %s, %s)' % (self.aid1, self.aid2, self.order) def __eq__(self, other): return (isinstance(other, type(self)) and self.aid1 == other.aid1 and self.aid2 == other.aid2 and self.order == other.order and self.style == other.style) @deprecated('Dictionary style access to Bond attributes is deprecated') def __getitem__(self, prop): if prop in {'order', 'style'}: return getattr(self, prop) raise KeyError(prop) @deprecated('Dictionary style access to Bond attributes is deprecated') def __setitem__(self, prop, val): setattr(self, prop, val) @deprecated('Dictionary style access to Atom attributes is deprecated') def __contains__(self, prop): if prop in {'order', 'style'}: return getattr(self, prop) is not None return False @deprecated('Dictionary style access to Atom attributes is deprecated') def __delitem__(self, prop): if not hasattr(self.__wrapped, prop): raise KeyError(prop) delattr(self.__wrapped, prop) def to_dict(self): data = {'aid1': self.aid1, 'aid2': self.aid2, 'order': self.order} if self.style is not None: data['style'] = self.style return data class Compound(object): def __init__(self, record): self._record = None self._atoms = {} self._bonds = {} self.record = record @property def record(self): return self._record @record.setter def record(self, record): self._record = record log.debug('Created %s' % self) self._setup_atoms() self._setup_bonds() def _setup_atoms(self): self._atoms = {} aids = self.record['atoms']['aid'] elements = self.record['atoms']['element'] if not len(aids) == len(elements): raise ResponseParseError('Error parsing atom elements') for aid, element in zip(aids, elements): self._atoms[aid] = Atom(aid=aid, number=element) if 'coords' in self.record: coord_ids = self.record['coords'][0]['aid'] xs = self.record['coords'][0]['conformers'][0]['x'] ys = self.record['coords'][0]['conformers'][0]['y'] zs = self.record['coords'][0]['conformers'][0].get('z', []) if not len(coord_ids) == len(xs) == len(ys) == len(self._atoms) or (zs and not len(zs) == len(coord_ids)): raise ResponseParseError('Error parsing atom coordinates') for aid, x, y, z in zip_longest(coord_ids, xs, ys, zs): self._atoms[aid].set_coordinates(x, y, z) if 'charge' in self.record['atoms']: for charge in self.record['atoms']['charge']: self._atoms[charge['aid']].charge = charge['value'] def _setup_bonds(self): self._bonds = {} if 'bonds' not in self.record: return aid1s = self.record['bonds']['aid1'] aid2s = self.record['bonds']['aid2'] orders = self.record['bonds']['order'] if not len(aid1s) == len(aid2s) == len(orders): raise ResponseParseError('Error parsing bonds') for aid1, aid2, order in zip(aid1s, aid2s, orders): self._bonds[frozenset((aid1, aid2))] = Bond(aid1=aid1, aid2=aid2, order=order) if 'coords' in self.record and 'style' in self.record['coords'][0]['conformers'][0]: aid1s = self.record['coords'][0]['conformers'][0]['style']['aid1'] aid2s = self.record['coords'][0]['conformers'][0]['style']['aid2'] styles = self.record['coords'][0]['conformers'][0]['style']['annotation'] for aid1, aid2, style in zip(aid1s, aid2s, styles): self._bonds[frozenset((aid1, aid2))].style = style @classmethod
MIT License
veritas9872/fastmri-kspace
models/complex/complex_layers.py
ComplexSigmoid.__init__
python
def __init__(self): super().__init__()
Performs the sigmoid on the magnitude of complex numbers.
https://github.com/veritas9872/fastmri-kspace/blob/4c484b3183e9f06838b5ee108af283611c2e1e77/models/complex/complex_layers.py#L123-L127
import torch from torch import nn, Tensor from torch.nn.init import _calculate_fan_in_and_fan_out import torch.nn.functional as F import numpy as np class ComplexInitializer: def __init__(self, method='kaiming'): assert method in ('kaiming', 'xavier'), 'Invalid initialization method.' self.method = method def get_weight_inits(self, weight_shape): fan_in, fan_out = _calculate_fan_in_and_fan_out(torch.zeros(size=weight_shape)) if self.method == 'xavier': mode = 1 / np.sqrt(fan_in + fan_out) elif self.method == 'kaiming': mode = 1 / np.sqrt(fan_in) else: raise NotImplementedError('Invalid initialization method.') magnitude = np.random.rayleigh(scale=mode, size=weight_shape) phase = np.random.uniform(low=-np.pi, high=np.pi, size=weight_shape) weight_real = torch.from_numpy((magnitude * np.cos(phase)).astype(np.float32)) weight_imag = torch.from_numpy((magnitude * np.sin(phase)).astype(np.float32)) return weight_real, weight_imag class ComplexLinear(nn.Module): def __init__(self, in_features, out_features, bias=True): super().__init__() self.linear_real = nn.Linear(in_features=in_features, out_features=out_features, bias=bias) self.linear_imag = nn.Linear(in_features=in_features, out_features=out_features, bias=bias) init = ComplexInitializer(method='kaiming') weight_real, weight_imag = init.get_weight_inits(weight_shape=self.linear_real.weight.shape) new_weights = {'linear_real.weight': weight_real, 'linear_imag.weight': weight_imag} self.load_state_dict(state_dict=new_weights, strict=False) def forward(self, tensor: Tensor): assert tensor.dim() == 3, 'Expected (N,2,F) format.' assert tensor.size(1) == 2, 'Expected real/imag to be represented in the second dimension, dim=1.' r = tensor.narrow(dim=1, start=0, length=1).squeeze(dim=1) i = tensor.narrow(dim=1, start=1, length=1).squeeze(dim=1) real = self.linear_real(r) - self.linear_imag(i) imag = self.linear_real(i) + self.linear_imag(i) return torch.stack([real, imag], dim=1) class ComplexConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'): super().__init__() kwargs = dict(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode) self.conv_real = nn.Conv2d(**kwargs) self.conv_imag = nn.Conv2d(**kwargs) init = ComplexInitializer(method='kaiming') weight_real, weight_imag = init.get_weight_inits(weight_shape=self.conv_real.weight.shape) new_weights = {'conv_real.weight': weight_real, 'conv_imag.weight': weight_imag} self.load_state_dict(state_dict=new_weights, strict=False) def forward(self, tensor: Tensor) -> Tensor: assert tensor.dim() == 5, 'Expected (N,2,C,H,W) format.' assert tensor.size(1) == 2, 'Expected real/imag to be represented in the second dimension, dim=1.' r = tensor.narrow(dim=1, start=0, length=1).squeeze(dim=1) i = tensor.narrow(dim=1, start=1, length=1).squeeze(dim=1) real = self.conv_real(r) - self.conv_imag(i) imag = self.conv_real(i) + self.conv_imag(r) return torch.stack([real, imag], dim=1) class ComplexSpatialDropout2d(nn.Module): def __init__(self, p=0.): super().__init__() self.drop = nn.Dropout3d(p=p) def forward(self, tensor: Tensor) -> Tensor: assert tensor.dim() == 5, 'Expected (N,2,C,H,W) format.' assert tensor.size(1) == 2, 'Expected real/imag to be represented in the second dimension, dim=1.' output = tensor.transpose(1, 2) output = self.drop(output) output = output.transpose(2, 1) return output class ComplexSigmoid(nn.Module):
MIT License
smartelect/smartelect
rollgen/tests/utils_for_tests.py
normalize_alef
python
def normalize_alef(s): return s.replace(ALEF_ISOLATED, ALEF_CANONICAL)
Given a string, replace Alef '\ufe8d' with Alef '\u0627
https://github.com/smartelect/smartelect/blob/d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8/rollgen/tests/utils_for_tests.py#L279-L291
import io import collections import random import os import re import sys import time from lxml import etree from pdfminer.pdfpage import PDFPage from pdfminer.pdfinterp import PDFResourceManager from pdfminer.pdfinterp import PDFPageInterpreter from pdfminer.converter import XMLConverter from pdfminer.layout import LAParams import rollgen.pdf_canvas from ..utils import is_iterable EXPECTED_FONTS = ('Amiri-Regular', 'Amiri-Bold',) ALEF_CANONICAL = '\u0627' ALEF_ISOLATED = '\ufe8d' NBSP = '\u00a0' KASRA = '\u0650' SHADDA = '\u0651' FATHA = '\u064e' DAMMA = '\u064f' DIACRITIC_SWAP_REGEX = '(.[{}])'.format(''.join((KASRA, SHADDA, FATHA, DAMMA))) DIACRITIC_SWAP_REGEX = re.compile(DIACRITIC_SWAP_REGEX, re.IGNORECASE) seed = int(time.time()) random.seed(seed) sys.stderr.write("seed is {}\n".format(seed)) def parse_bbox(bbox): return list(map(float, bbox.split(','))) def unwrap_lines(lines, index): return lines[:index] + [lines[index + 1] + ' ' + lines[index]] + lines[index + 2:] def clean_font_name(font_name): return font_name.split('+')[1] if ('+' in font_name) else font_name def clean_textlines(textlines): lines = [] for text_elements in textlines: line = ''.join([text_element.text for text_element in text_elements]) line = normalize_alef(line) line = swap_diacritics(line) lines.append(line) return lines def extract_pdf_page(filename, page_number_or_numbers): if is_iterable(page_number_or_numbers): page_numbers = page_number_or_numbers else: page_numbers = [page_number_or_numbers] f_out = io.BytesIO() laparams = LAParams() rsrcmgr = PDFResourceManager() device = XMLConverter(rsrcmgr, f_out, codec='utf-8', laparams=laparams) with open(filename, 'rb') as f_in: interpreter = PDFPageInterpreter(rsrcmgr, device) for page in PDFPage.get_pages(f_in, page_numbers): interpreter.process_page(page) device.close() xml = f_out.getvalue() f_out.close() return xml def extract_textlines(xml): root = etree.fromstring(xml) text_elements = root.xpath('.//text') lines_by_y_value = collections.defaultdict(list) for text_element in text_elements: bbox = text_element.get('bbox') if bbox: x0, y0, x1, y1 = parse_bbox(text_element.get('bbox')) lines_by_y_value[y1].append((text_element, (x0, x1))) y_values = sorted(lines_by_y_value.keys(), reverse=True) lines = [] for y_value in y_values: line = lines_by_y_value[y_value] line = sorted(line, key=lambda item: item[1]) line = [item[0] for item in line] lines.append(line) return lines def extract_line_lengths(xml): lines = extract_textlines(xml) line_lengths = [] for line in lines: character_bboxes = [parse_bbox(text_element.get('bbox')) for text_element in line] line_lengths.append(character_bboxes[-1][2] - character_bboxes[0][0]) return line_lengths def _get_random_words(filename, n_words): words = open(filename, 'rb').read().decode('utf-8') words = [word.strip() for word in words.split('\n') if word.strip()] while len(words) < n_words: words = words + words return random.sample(words, n_words) def get_random_arabic_person_names(n_names): return _get_random_words(os.path.join('.', 'tests', '_random_arabic_person_names.txt'), n_names) def get_random_arabic_place_names(n_names): return _get_random_words(os.path.join('.', 'tests', '_random_arabic_place_names.txt'), n_names) def generate_arabic_place_name(min_length=0): make_name = lambda n_words: ' '.join(get_random_arabic_place_names(random.randint(1, n_words))) n_words = 3 name = make_name(n_words) while len(name) < min_length: n_words += 1 name = make_name(n_words) return name
Apache License 2.0
unofficial-memsource/memsource-cli-client
memsource_cli/models/project_trans_memory_dto_v2.py
ProjectTransMemoryDtoV2.trans_memory
python
def trans_memory(self): return self._trans_memory
Gets the trans_memory of this ProjectTransMemoryDtoV2. # noqa: E501 :return: The trans_memory of this ProjectTransMemoryDtoV2. # noqa: E501 :rtype: TransMemoryDtoV2
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/project_trans_memory_dto_v2.py#L168-L175
import pprint import re import six from memsource_cli.models.trans_memory_dto_v2 import TransMemoryDtoV2 from memsource_cli.models.workflow_step_reference import WorkflowStepReference class ProjectTransMemoryDtoV2(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'target_locale': 'str', 'workflow_step': 'WorkflowStepReference', 'read_mode': 'bool', 'write_mode': 'bool', 'trans_memory': 'TransMemoryDtoV2', 'penalty': 'float', 'apply_penalty_to101_only': 'bool' } attribute_map = { 'target_locale': 'targetLocale', 'workflow_step': 'workflowStep', 'read_mode': 'readMode', 'write_mode': 'writeMode', 'trans_memory': 'transMemory', 'penalty': 'penalty', 'apply_penalty_to101_only': 'applyPenaltyTo101Only' } def __init__(self, target_locale=None, workflow_step=None, read_mode=None, write_mode=None, trans_memory=None, penalty=None, apply_penalty_to101_only=None): self._target_locale = None self._workflow_step = None self._read_mode = None self._write_mode = None self._trans_memory = None self._penalty = None self._apply_penalty_to101_only = None self.discriminator = None if target_locale is not None: self.target_locale = target_locale if workflow_step is not None: self.workflow_step = workflow_step if read_mode is not None: self.read_mode = read_mode if write_mode is not None: self.write_mode = write_mode if trans_memory is not None: self.trans_memory = trans_memory if penalty is not None: self.penalty = penalty if apply_penalty_to101_only is not None: self.apply_penalty_to101_only = apply_penalty_to101_only @property def target_locale(self): return self._target_locale @target_locale.setter def target_locale(self, target_locale): self._target_locale = target_locale @property def workflow_step(self): return self._workflow_step @workflow_step.setter def workflow_step(self, workflow_step): self._workflow_step = workflow_step @property def read_mode(self): return self._read_mode @read_mode.setter def read_mode(self, read_mode): self._read_mode = read_mode @property def write_mode(self): return self._write_mode @write_mode.setter def write_mode(self, write_mode): self._write_mode = write_mode @property
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/light/xiaomi_miio.py
XiaomiPhilipsEyecareLamp.async_reminder_on
python
async def async_reminder_on(self): await self._try_command( "Turning on the reminder failed.", self._light.reminder_on)
Enable the eye fatigue notification.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/light/xiaomi_miio.py#L615-L619
import asyncio from functools import partial import logging from math import ceil from datetime import timedelta import datetime import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.light import ( PLATFORM_SCHEMA, ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, ATTR_COLOR_TEMP, SUPPORT_COLOR_TEMP, Light, ATTR_ENTITY_ID, DOMAIN, ) from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_TOKEN, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.util import dt _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Xiaomi Philips Light' DATA_KEY = 'light.xiaomi_miio' CONF_MODEL = 'model' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_MODEL): vol.In( ['philips.light.sread1', 'philips.light.ceiling', 'philips.light.zyceiling', 'philips.light.bulb', 'philips.light.candle', 'philips.light.candle2']), }) REQUIREMENTS = ['python-miio==0.3.9', 'construct==2.9.41'] CCT_MIN = 1 CCT_MAX = 100 DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4 DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1 SUCCESS = ['ok'] ATTR_MODEL = 'model' ATTR_SCENE = 'scene' ATTR_DELAYED_TURN_OFF = 'delayed_turn_off' ATTR_TIME_PERIOD = 'time_period' ATTR_NIGHT_LIGHT_MODE = 'night_light_mode' ATTR_AUTOMATIC_COLOR_TEMPERATURE = 'automatic_color_temperature' ATTR_REMINDER = 'reminder' ATTR_EYECARE_MODE = 'eyecare_mode' SERVICE_SET_SCENE = 'xiaomi_miio_set_scene' SERVICE_SET_DELAYED_TURN_OFF = 'xiaomi_miio_set_delayed_turn_off' SERVICE_REMINDER_ON = 'xiaomi_miio_reminder_on' SERVICE_REMINDER_OFF = 'xiaomi_miio_reminder_off' SERVICE_NIGHT_LIGHT_MODE_ON = 'xiaomi_miio_night_light_mode_on' SERVICE_NIGHT_LIGHT_MODE_OFF = 'xiaomi_miio_night_light_mode_off' SERVICE_EYECARE_MODE_ON = 'xiaomi_miio_eyecare_mode_on' SERVICE_EYECARE_MODE_OFF = 'xiaomi_miio_eyecare_mode_off' XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, }) SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend({ vol.Required(ATTR_SCENE): vol.All(vol.Coerce(int), vol.Clamp(min=1, max=4)) }) SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend({ vol.Required(ATTR_TIME_PERIOD): vol.All(cv.time_period, cv.positive_timedelta) }) SERVICE_TO_METHOD = { SERVICE_SET_DELAYED_TURN_OFF: { 'method': 'async_set_delayed_turn_off', 'schema': SERVICE_SCHEMA_SET_DELAYED_TURN_OFF}, SERVICE_SET_SCENE: { 'method': 'async_set_scene', 'schema': SERVICE_SCHEMA_SET_SCENE}, SERVICE_REMINDER_ON: {'method': 'async_reminder_on'}, SERVICE_REMINDER_OFF: {'method': 'async_reminder_off'}, SERVICE_NIGHT_LIGHT_MODE_ON: {'method': 'async_night_light_mode_on'}, SERVICE_NIGHT_LIGHT_MODE_OFF: {'method': 'async_night_light_mode_off'}, SERVICE_EYECARE_MODE_ON: {'method': 'async_eyecare_mode_on'}, SERVICE_EYECARE_MODE_OFF: {'method': 'async_eyecare_mode_off'}, } async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): from miio import Device, DeviceException if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} host = config.get(CONF_HOST) name = config.get(CONF_NAME) token = config.get(CONF_TOKEN) model = config.get(CONF_MODEL) _LOGGER.info("Initializing with host %s (token %s...)", host, token[:5]) devices = [] unique_id = None if model is None: try: miio_device = Device(host, token) device_info = miio_device.info() model = device_info.model unique_id = "{}-{}".format(model, device_info.mac_address) _LOGGER.info("%s %s %s detected", model, device_info.firmware_version, device_info.hardware_version) except DeviceException: raise PlatformNotReady if model == 'philips.light.sread1': from miio import PhilipsEyecare light = PhilipsEyecare(host, token) primary_device = XiaomiPhilipsEyecareLamp( name, light, model, unique_id) devices.append(primary_device) hass.data[DATA_KEY][host] = primary_device secondary_device = XiaomiPhilipsEyecareLampAmbientLight( name, light, model, unique_id) devices.append(secondary_device) elif model in ['philips.light.ceiling', 'philips.light.zyceiling']: from miio import Ceil light = Ceil(host, token) device = XiaomiPhilipsCeilingLamp(name, light, model, unique_id) devices.append(device) hass.data[DATA_KEY][host] = device elif model in ['philips.light.bulb', 'philips.light.candle', 'philips.light.candle2']: from miio import PhilipsBulb light = PhilipsBulb(host, token) device = XiaomiPhilipsBulb(name, light, model, unique_id) devices.append(device) hass.data[DATA_KEY][host] = device else: _LOGGER.error( 'Unsupported device found! Please create an issue at ' 'https://github.com/syssi/philipslight/issues ' 'and provide the following data: %s', model) return False async_add_devices(devices, update_before_add=True) async def async_service_handler(service): method = SERVICE_TO_METHOD.get(service.service) params = {key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID} entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: target_devices = [dev for dev in hass.data[DATA_KEY].values() if dev.entity_id in entity_ids] else: target_devices = hass.data[DATA_KEY].values() update_tasks = [] for target_device in target_devices: if not hasattr(target_device, method['method']): continue await getattr(target_device, method['method'])(**params) update_tasks.append(target_device.async_update_ha_state(True)) if update_tasks: await asyncio.wait(update_tasks, loop=hass.loop) for xiaomi_miio_service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[xiaomi_miio_service].get( 'schema', XIAOMI_MIIO_SERVICE_SCHEMA) hass.services.async_register( DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema) class XiaomiPhilipsAbstractLight(Light): def __init__(self, name, light, model, unique_id): self._name = name self._light = light self._model = model self._unique_id = unique_id self._brightness = None self._available = False self._state = None self._state_attrs = { ATTR_MODEL: self._model, } @property def should_poll(self): return True @property def unique_id(self): return self._unique_id @property def name(self): return self._name @property def available(self): return self._available @property def device_state_attributes(self): return self._state_attrs @property def is_on(self): return self._state @property def brightness(self): return self._brightness @property def supported_features(self): return SUPPORT_BRIGHTNESS async def _try_command(self, mask_error, func, *args, **kwargs): from miio import DeviceException try: result = await self.hass.async_add_job( partial(func, *args, **kwargs)) _LOGGER.debug("Response received from light: %s", result) return result == SUCCESS except DeviceException as exc: _LOGGER.error(mask_error, exc) self._available = False return False async def async_turn_on(self, **kwargs): if ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] percent_brightness = ceil(100 * brightness / 255.0) _LOGGER.debug( "Setting brightness: %s %s%%", brightness, percent_brightness) result = await self._try_command( "Setting brightness failed: %s", self._light.set_brightness, percent_brightness) if result: self._brightness = brightness else: await self._try_command( "Turning the light on failed.", self._light.on) async def async_turn_off(self, **kwargs): await self._try_command( "Turning the light off failed.", self._light.off) async def async_update(self): from miio import DeviceException try: state = await self.hass.async_add_job(self._light.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.is_on self._brightness = ceil((255 / 100.0) * state.brightness) except DeviceException as ex: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight): def __init__(self, name, light, model, unique_id): super().__init__(name, light, model, unique_id) self._state_attrs.update({ ATTR_SCENE: None, ATTR_DELAYED_TURN_OFF: None, }) async def async_update(self): from miio import DeviceException try: state = await self.hass.async_add_job(self._light.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.is_on self._brightness = ceil((255 / 100.0) * state.brightness) delayed_turn_off = self.delayed_turn_off_timestamp( state.delay_off_countdown, dt.utcnow(), self._state_attrs[ATTR_DELAYED_TURN_OFF]) self._state_attrs.update({ ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off, }) except DeviceException as ex: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) async def async_set_scene(self, scene: int = 1): await self._try_command( "Setting a fixed scene failed.", self._light.set_scene, scene) async def async_set_delayed_turn_off(self, time_period: timedelta): await self._try_command( "Setting the turn off delay failed.", self._light.delay_off, time_period.total_seconds()) @staticmethod def delayed_turn_off_timestamp(countdown: int, current: datetime, previous: datetime): if countdown is not None and countdown > 0: new = current.replace(microsecond=0) + timedelta(seconds=countdown) if previous is None: return new lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS) upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS) diff = previous - new if lower < diff < upper: return previous return new return None class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight): def __init__(self, name, light, model, unique_id): super().__init__(name, light, model, unique_id) self._color_temp = None @property def color_temp(self): return self._color_temp @property def min_mireds(self): return 175 @property def max_mireds(self): return 333 @property def supported_features(self): return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP async def async_turn_on(self, **kwargs): if ATTR_COLOR_TEMP in kwargs: color_temp = kwargs[ATTR_COLOR_TEMP] percent_color_temp = self.translate( color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX) if ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] percent_brightness = ceil(100 * brightness / 255.0) if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs: _LOGGER.debug( "Setting brightness and color temperature: " "%s %s%%, %s mireds, %s%% cct", brightness, percent_brightness, color_temp, percent_color_temp) result = await self._try_command( "Setting brightness and color temperature failed: " "%s bri, %s cct", self._light.set_brightness_and_color_temperature, percent_brightness, percent_color_temp) if result: self._color_temp = color_temp self._brightness = brightness elif ATTR_COLOR_TEMP in kwargs: _LOGGER.debug( "Setting color temperature: " "%s mireds, %s%% cct", color_temp, percent_color_temp) result = await self._try_command( "Setting color temperature failed: %s cct", self._light.set_color_temperature, percent_color_temp) if result: self._color_temp = color_temp elif ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] percent_brightness = ceil(100 * brightness / 255.0) _LOGGER.debug( "Setting brightness: %s %s%%", brightness, percent_brightness) result = await self._try_command( "Setting brightness failed: %s", self._light.set_brightness, percent_brightness) if result: self._brightness = brightness else: await self._try_command( "Turning the light on failed.", self._light.on) async def async_update(self): from miio import DeviceException try: state = await self.hass.async_add_job(self._light.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.is_on self._brightness = ceil((255 / 100.0) * state.brightness) self._color_temp = self.translate( state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds) delayed_turn_off = self.delayed_turn_off_timestamp( state.delay_off_countdown, dt.utcnow(), self._state_attrs[ATTR_DELAYED_TURN_OFF]) self._state_attrs.update({ ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off, }) except DeviceException as ex: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) @staticmethod def translate(value, left_min, left_max, right_min, right_max): left_span = left_max - left_min right_span = right_max - right_min value_scaled = float(value - left_min) / float(left_span) return int(right_min + (value_scaled * right_span)) class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb): def __init__(self, name, light, model, unique_id): super().__init__(name, light, model, unique_id) self._state_attrs.update({ ATTR_NIGHT_LIGHT_MODE: None, ATTR_AUTOMATIC_COLOR_TEMPERATURE: None, }) @property def min_mireds(self): return 175 @property def max_mireds(self): return 370 async def async_update(self): from miio import DeviceException try: state = await self.hass.async_add_job(self._light.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.is_on self._brightness = ceil((255 / 100.0) * state.brightness) self._color_temp = self.translate( state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds) delayed_turn_off = self.delayed_turn_off_timestamp( state.delay_off_countdown, dt.utcnow(), self._state_attrs[ATTR_DELAYED_TURN_OFF]) self._state_attrs.update({ ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off, ATTR_NIGHT_LIGHT_MODE: state.smart_night_light, ATTR_AUTOMATIC_COLOR_TEMPERATURE: state.automatic_color_temperature, }) except DeviceException as ex: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight): def __init__(self, name, light, model, unique_id): super().__init__(name, light, model, unique_id) self._state_attrs.update({ ATTR_REMINDER: None, ATTR_NIGHT_LIGHT_MODE: None, ATTR_EYECARE_MODE: None, }) async def async_update(self): from miio import DeviceException try: state = await self.hass.async_add_job(self._light.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.is_on self._brightness = ceil((255 / 100.0) * state.brightness) delayed_turn_off = self.delayed_turn_off_timestamp( state.delay_off_countdown, dt.utcnow(), self._state_attrs[ATTR_DELAYED_TURN_OFF]) self._state_attrs.update({ ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off, ATTR_REMINDER: state.reminder, ATTR_NIGHT_LIGHT_MODE: state.smart_night_light, ATTR_EYECARE_MODE: state.eyecare, }) except DeviceException as ex: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) async def async_set_delayed_turn_off(self, time_period: timedelta): await self._try_command( "Setting the turn off delay failed.", self._light.delay_off, round(time_period.total_seconds() / 60))
MIT License
flatironinstitute/inferelator
inferelator/utils/data.py
InferelatorData.add
python
def add(self, val): self._data[...] = self._data + val
Add a value to the matrix in-place :param val: Value to add :type val: numeric
https://github.com/flatironinstitute/inferelator/blob/dd532f428132cfac98c9c7c161632dab2a4e9ea9/inferelator/utils/data.py#L729-L735
from __future__ import print_function, unicode_literals, division import copy as cp import gc import math import pandas as pd import numpy as np import scipy.sparse as sparse import scipy.stats import pandas.api.types as pat from sklearn.preprocessing import StandardScaler import scipy.io from anndata import AnnData from inferelator.utils import Debug from inferelator.utils import Validator as check def dot_product(a, b, dense=True, cast=True): if sparse.isspmatrix(a) and sparse.isspmatrix(b): return a.dot(b).A if dense else a.dot(b) elif sparse.isspmatrix(a): return a.dot(sparse.csr_matrix(b)).A if dense else a.dot(sparse.csr_matrix(b)) elif sparse.isspmatrix(b): return np.dot(a, b.A) else: return np.dot(a, b) class DotProduct: _dot_func = dot_product @classmethod def set_mkl(cls, mkl=True): if mkl is None: pass if mkl: try: from sparse_dot_mkl import get_version_string, dot_product_mkl as dp msg = "Matrix multiplication will use sparse_dot_mkl package with MKL: {m}" vstring = get_version_string() Debug.vprint(msg.format(m=vstring if vstring is not None else "Install mkl-service for details"), level=2) cls._dot_func = dp except ImportError as err: Debug.vprint("Unable to load MKL with sparse_dot_mkl:\n" + str(err), level=0) cls._dot_func = dot_product else: Debug.vprint("Matrix multiplication will use Numpy; this is not advised for sparse data", level=2) cls._dot_func = dot_product @classmethod def dot(cls, *args, **kwargs): return cls._dot_func(*args, **kwargs) def df_from_tsv(file_like, has_index=True): return pd.read_csv(file_like, sep="\t", header=0, index_col=0 if has_index else False) def df_set_diag(df, val, copy=True): isect = df.index.intersection(df.columns) if copy: df = df.copy() for i in range(len(isect)): df.loc[isect[i], isect[i]] = val if copy: return df else: return len(isect) def array_set_diag(arr, val, row_labels, col_labels): if arr.ndim != 2: raise ValueError("Array must be 2D") isect = set(row_labels).intersection(col_labels) for i in isect: arr[row_labels == i, col_labels == i] = val return len(isect) def make_array_2d(arr): if arr.ndim == 1: arr.shape = (arr.shape[0], 1) def melt_and_reindex_dataframe(data_frame, value_name, idx_name="target", col_name="regulator"): data_frame = data_frame.copy() data_frame[idx_name] = data_frame.index data_frame = data_frame.melt(id_vars=idx_name, var_name=col_name, value_name=value_name) data_frame.index = pd.MultiIndex.from_frame(data_frame.loc[:, [idx_name, col_name]]) del data_frame[idx_name] del data_frame[col_name] return data_frame def scale_vector(vec, ddof=1): if sparse.isspmatrix(vec): vec = vec.A if np.var(vec) == 0: return np.zeros(vec.shape, dtype=float) else: return scipy.stats.zscore(vec, axis=None, ddof=ddof) def apply_window_vector(vec, window, func): steps = math.ceil(len(vec) / window) return np.array([func(vec[i * window:min((i + 1) * window, len(vec))]) for i in range(steps)]) class InferelatorData(object): name = None _adata = None @property def _is_integer(self): return pat.is_integer_dtype(self._adata.X.dtype) @property def expression_data(self): return self._adata.X @expression_data.setter def expression_data(self, new_data): self._adata.X = new_data @property def values(self): return self._adata.X @property def _data(self): if self.is_sparse: return self._adata.X.data else: return self._adata.X @_data.setter def _data(self, new_data): if self.is_sparse: self._adata.X.data = new_data else: self._adata.X = new_data @property def _data_mem_usage(self): if self.is_sparse: return self._adata.X.data.nbytes + self._adata.X.indices.nbytes + self._adata.X.indptr.nbytes else: return self._adata.X.nbytes @property def meta_data(self): return self._adata.obs @meta_data.setter def meta_data(self, new_meta_data): if isinstance(new_meta_data, InferelatorData): new_meta_data = new_meta_data.meta_data new_meta_data = new_meta_data.copy() new_meta_data.index = new_meta_data.index.astype(str) if self._adata.obs_names.nunique() != self.num_obs: self._adata.obs_names_make_unique() if new_meta_data.index.nunique() != new_meta_data.shape[0]: new_meta_data = new_meta_data.loc[~new_meta_data.duplicated(), :] try: new_meta_data = new_meta_data.reindex(self.sample_names) except ValueError: if new_meta_data.shape[0] != self.num_obs: msg = "Metadata size {sh1} does not match data ({sh2})".format(sh1=new_meta_data.shape, sh2=self.num_obs) raise ValueError(msg) new_meta_data.index = self.sample_names if len(self._adata.obs.columns) > 0: keep_columns = self._adata.obs.columns.difference(new_meta_data.columns) self._adata.obs = pd.concat((new_meta_data, self._adata.obs.loc[:, keep_columns]), axis=1) else: self._adata.obs = new_meta_data @property def gene_data(self): return self._adata.var @gene_data.setter def gene_data(self, new_gene_data): if isinstance(new_gene_data, InferelatorData): new_gene_data = new_gene_data.gene_data new_gene_data = new_gene_data.copy() new_gene_data.index = new_gene_data.index.astype(str) self._adata.uns["trim_gene_list"] = new_gene_data.index.intersection(self._adata.var.index) new_gene_data = new_gene_data.reindex(self._adata.var_names) if len(self._adata.var.columns) > 0: keep_columns = self._adata.var.columns.difference(new_gene_data.columns) self._adata.var = pd.concat((new_gene_data, self._adata.var.loc[:, keep_columns]), axis=1) else: self._adata.var = new_gene_data @property def gene_names(self): return self._adata.var_names @property def gene_counts(self): return self._adata.X.sum(axis=0).A.flatten() if self.is_sparse else self._adata.X.sum(axis=0) @property def gene_means(self): return self._adata.X.mean(axis=0).A.flatten() if self.is_sparse else self._adata.X.mean(axis=0) @property def gene_stdev(self): if self.is_sparse: return np.sqrt(StandardScaler(copy=False, with_mean=False).fit(self._adata.X).var_) else: return self._adata.X.std(axis=0, ddof=1) @property def sample_names(self): return self._adata.obs_names @property def sample_counts(self): return self._adata.X.sum(axis=1).A.flatten() if self.is_sparse else self._adata.X.sum(axis=1) @property def sample_means(self): return self._adata.X.mean(axis=1).A.flatten() if self.is_sparse else self._adata.X.mean(axis=1) @property def sample_stdev(self): if self.is_sparse: return np.sqrt(StandardScaler(copy=False, with_mean=False).fit(self._adata.X.T).var_) else: return self._adata.X.std(axis=1, ddof=1) @property def non_finite(self): if min(self._data.shape) == 0: return 0, None elif self.is_sparse: nnf = np.sum(apply_window_vector(self._adata.X.data, 1000000, lambda x: np.sum(~np.isfinite(x)))) return nnf, ["GENES_NOT_ID_SPARSE_MATRIX"] if nnf > 0 else None else: non_finite = np.apply_along_axis(lambda x: np.sum(~np.isfinite(x)) > 0, 0, self._data) nnf = np.sum(non_finite) return nnf, self.gene_names[non_finite] if nnf > 0 else None @property def is_sparse(self): return sparse.issparse(self._adata.X) @property def shape(self): return self._adata.shape @property def num_obs(self): return self._adata.shape[0] @property def num_genes(self): return self._adata.shape[1] @property def uns(self): return self._adata.uns def __str__(self): msg = "InferelatorData [{dt} {sh}, Metadata {me}] Memory: {mem:.2f} MB" return msg.format(sh=self.shape, dt=self._data.dtype, me=self.meta_data.shape, mem=(self._data_mem_usage / 1e6)) def __init__(self, expression_data=None, transpose_expression=False, meta_data=None, gene_data=None, gene_data_idx_column=None, gene_names=None, sample_names=None, dtype=None, name=None): if expression_data is not None and isinstance(expression_data, pd.DataFrame): object_cols = expression_data.dtypes == object if sum(object_cols) > 0: object_data = expression_data.loc[:, object_cols] meta_data = object_data if meta_data is None else pd.concat((meta_data, object_data), axis=1) expression_data.drop(expression_data.columns[object_cols], inplace=True, axis=1) if dtype is None and all(map(lambda x: pat.is_integer_dtype(x), expression_data.dtypes)): dtype = 'int32' elif dtype is None: dtype = 'float64' self._make_idx_str(expression_data) if transpose_expression: self._adata = AnnData(X=expression_data.T, dtype=dtype) else: self._adata = AnnData(X=expression_data, dtype=dtype) elif expression_data is not None and isinstance(expression_data, AnnData): self._adata = expression_data elif expression_data is not None: self._adata = AnnData(X=expression_data.T if transpose_expression else expression_data, dtype=expression_data.dtype) else: self._adata = AnnData() if gene_names is not None and len(gene_names) > 0: self._adata.var_names = gene_names if sample_names is not None and len(sample_names) > 0: self._adata.obs_names = sample_names if meta_data is not None: self._make_idx_str(meta_data) self.meta_data = meta_data if gene_data is not None: if gene_data_idx_column is not None and gene_data_idx_column in gene_data: gene_data.index = gene_data[gene_data_idx_column] elif gene_data_idx_column is not None: msg = "No gene_data column {c} in {a}".format(c=gene_data_idx_column, a=" ".join(gene_data.columns)) raise ValueError(msg) self._make_idx_str(gene_data) self.gene_data = gene_data self._cached = {} self.name = name def convert_to_float(self): if pat.is_float_dtype(self._data.dtype): return None elif self._data.dtype == np.int32: dtype = np.float32 elif self._data.dtype == np.int64: dtype = np.float64 else: raise ValueError("Data is not float, int32, or int64") float_view = self._data.view(dtype) float_view[:] = self._data self._data = float_view def trim_genes(self, remove_constant_genes=True, trim_gene_list=None): keep_column_bool = np.ones((len(self._adata.var_names),), dtype=bool) if trim_gene_list is not None: keep_column_bool &= self._adata.var_names.isin(trim_gene_list) if "trim_gene_list" in self._adata.uns: keep_column_bool &= self._adata.var_names.isin(self._adata.uns["trim_gene_list"]) list_trim = len(self._adata.var_names) - np.sum(keep_column_bool) comp = 0 if self._is_integer else np.finfo(self.values.dtype).eps * 10 if remove_constant_genes: nz_var = self.values.max(axis=0) - self.values.min(axis=0) nz_var = nz_var.A.flatten() if self.is_sparse else nz_var if np.any(np.isnan(nz_var)): raise ValueError("NaN values are present in the expression matrix; unable to remove var=0 genes") nz_var = comp < nz_var keep_column_bool &= nz_var var_zero_trim = np.sum(nz_var) else: var_zero_trim = 0 if np.sum(keep_column_bool) == 0: err_msg = "No genes remain after trimming. ({lst} removed to match list, {v} removed for var=0)" raise ValueError(err_msg.format(lst=list_trim, v=var_zero_trim)) if np.sum(keep_column_bool) == self._adata.shape[1]: pass else: Debug.vprint("Trimming {name} matrix {sh} to {n} columns".format(name=self.name, sh=self._adata.X.shape, n=np.sum(keep_column_bool)), level=1) self._adata = AnnData(self._adata.X[:, keep_column_bool], obs=self._adata.obs.copy(), var=self._adata.var.loc[keep_column_bool, :].copy(), dtype=self._adata.X.dtype) gc.collect() def get_gene_data(self, gene_list, copy=False, force_dense=False, to_df=False, zscore=False, flatten=False): x = self._adata[:, gene_list] labels = x.var_names if (force_dense or to_df or zscore) and self.is_sparse: x = x.X.A copy = False else: x = x.X if zscore: z_x = np.subtract(x, self.obs_means.reshape(-1, 1)) z_x = np.divide(z_x, self.obs_stdev.reshape(-1, 1)) del x x = z_x copy = False if flatten and x.ndim == 2: new_x = x.flatten() elif (flatten and x.ndim == 1) or copy: new_x = x.copy() else: new_x = x return pd.DataFrame(new_x, columns=labels, index=self.sample_names) if to_df else new_x def get_sample_data(self, sample_index, copy=False, force_dense=False, to_df=False, zscore=False): x = self._adata[sample_index, :] labels = x.obs_names if (force_dense or to_df or zscore) and self.is_sparse: x = x.X.A else: x = x.X if zscore: x = np.subtract(x, self.obs_means[sample_index].reshape(-1, 1)) x = np.divide(x, self.obs_stdev[sample_index].reshape(-1, 1)) elif copy: x = x.X.copy() return pd.DataFrame(x, columns=self.gene_names, index=labels) if to_df else x def get_bootstrap(self, sample_bootstrap_index): return InferelatorData(expression_data=self._adata.X[sample_bootstrap_index, :].copy(), gene_names=self.gene_names) def get_random_samples(self, num_obs, with_replacement=False, random_seed=None, random_gen=None, inplace=False, fix_names=True): check.argument_integer(num_obs, low=1) check.argument_integer(random_seed, allow_none=True) if (num_obs > self.num_obs) and not with_replacement: _msg = "Unable to sample {x} from {y} observations without replacement".format(x=num_obs, y=self.num_obs) raise ValueError(_msg) if random_gen is None: random_gen = np.random.default_rng() if random_seed is None else np.random.default_rng(random_seed) if with_replacement: keeper_ilocs = random_gen.integers(self.num_obs, size=(num_obs,)) else: keeper_ilocs = random_gen.choice(np.arange(self.num_obs), size=(num_obs,), replace=False) if inplace: self._adata = self._adata[keeper_ilocs, :].copy() return_obj = self else: return_obj = InferelatorData(self._adata[keeper_ilocs, :].copy()) return_obj._adata.obs_names_make_unique() if with_replacement and fix_names else None return return_obj def subset_copy(self, row_index=None, column_index=None): if row_index is not None and column_index is not None: data_view = self._adata[row_index, column_index] elif row_index is not None: data_view = self._adata[row_index, :] elif column_index is not None: data_view = self._adata[: column_index] else: data_view = self._adata return InferelatorData(data_view.copy()) def dot(self, other, other_is_right_side=True, force_dense=False): if other_is_right_side: return DotProduct.dot(self._adata.X, other, cast=True, dense=force_dense) else: return DotProduct.dot(other, self._adata.X, cast=True, dense=force_dense) def to_csv(self, file_name, sep="\t"): if self.is_sparse: scipy.io.mmwrite(file_name, self.values) else: self._adata.to_df().to_csv(file_name, sep=sep) def to_h5ad(self, file_name, compression="gzip"): self._adata.write(file_name, compression=compression) def transform(self, func, add_pseudocount=False, memory_efficient=True, chunksize=1000): if add_pseudocount and self.is_sparse: self._adata.X.data += 1 elif add_pseudocount: self._adata.X += 1 if self.is_sparse: self._adata.X.data = func(self._adata.X.data) elif self._adata.X.ndim == 1 or self._is_integer: self._adata.X = func(self._adata.X) elif not memory_efficient and type(func(self._data.flat[0])) == self._adata.X.dtype: self._adata.X[...] = func(self._adata.X) elif memory_efficient and type(func(self._data.flat[0])) == self._adata.X.dtype: for i in range(math.ceil(self._adata.shape[0] / chunksize)): start, stop = i * chunksize, min(i + 1 * chunksize, self._adata.shape[0]) self._adata.X[start:stop, :] = func(self._adata.X[start:stop, :]) else: self._adata.X = func(self._adata.X)
BSD 2-Clause Simplified License
nielstron/quantulum3
quantulum3/parser.py
parse_unit
python
def parse_unit(item, unit, slash, lang="en_US"): return _get_parser(lang).parse_unit(item, unit, slash)
Parse surface and power from unit text.
https://github.com/nielstron/quantulum3/blob/c99d738f05e2e8ade348c883747f65c13495f504/quantulum3/parser.py#L257-L261
import logging import re from collections import defaultdict from fractions import Fraction from typing import List from . import classes as cls from . import disambiguate as dis from . import language, load from . import regex as reg _LOGGER = logging.getLogger(__name__) def _get_parser(lang="en_US"): return language.get("parser", lang) def extract_spellout_values(text, lang="en_US"): return _get_parser(lang).extract_spellout_values(text) def substitute_values(text, values): shift, final_text, shifts = 0, text, defaultdict(int) for value in values: first = value["old_span"][0] + shift second = value["old_span"][1] + shift final_text = final_text[0:first] + value["new_surface"] + final_text[second:] shift += len(value["new_surface"]) - len(value["old_surface"]) for char in range(first + 1, len(final_text)): shifts[char] = shift _LOGGER.debug('Text after numeric conversion: "%s"', final_text) return final_text, shifts def get_values(item, lang="en_US"): def callback(pattern): return " %s" % (reg.unicode_fractions()[pattern.group(0)]) fracs = r"|".join(reg.unicode_fractions()) value = item.group("value") value = re.sub( r"(?<=\d)[%s](?=\d{3})" % reg.grouping_operators_regex(lang), "", value ) value = re.sub( r"(?<=\d)(%s)(e|E|10)\^?" % reg.multiplication_operators_regex(lang), "e", value ) value, factors = resolve_exponents(value) _LOGGER.debug("After exponent resolution: {}".format(value)) value = re.sub(fracs, callback, value, re.IGNORECASE) range_separator = re.findall( r"\d+ ?((?:-\ )?(?:%s)) ?\d" % "|".join(reg.ranges(lang)), value ) uncer_separator = re.findall( r"\d+ ?(%s) ?\d" % "|".join(reg.uncertainties(lang)), value ) fract_separator = re.findall(r"\d+/\d+", value) value = re.sub(" +", " ", value) uncertainty = None if range_separator: values = value.split(range_separator[0]) values = [ float(re.sub(r"-$", "", v)) * factors[i] for i, v in enumerate(values) ] if values[1] < values[0]: raise ValueError( "Invalid range, with second item being smaller than the first " "item" ) mean = sum(values) / len(values) uncertainty = mean - min(values) values = [mean] elif uncer_separator: values = [float(i) for i in value.split(uncer_separator[0])] uncertainty = values[1] * factors[1] values = [values[0] * factors[0]] elif fract_separator: values = value.split() try: if len(values) > 1: values = [float(values[0]) * factors[0] + float(Fraction(values[1]))] else: values = [float(Fraction(values[0]))] except ZeroDivisionError as e: raise ValueError("{} is not a number".format(values[0]), e) else: values = [float(re.sub(r"-$", "", value)) * factors[0]] _LOGGER.debug("\tUncertainty: %s", uncertainty) _LOGGER.debug("\tValues: %s", values) return uncertainty, values def resolve_exponents(value, lang="en_US"): factors = [] matches = re.finditer( reg.number_pattern_groups(lang), value, re.IGNORECASE | re.VERBOSE ) for item in matches: if item.group("base") and item.group("exponent"): base = item.group("base") exp = item.group("exponent") if base in ["e", "E"]: factors.append(1) continue if re.match(r"\d+\^?", base): if not ( "^" in base or re.match(r"[%s]" % reg.unicode_superscript_regex(), exp) ): factors.append(1) continue for superscript, substitute in reg.unicode_superscript().items(): exp.replace(superscript, substitute) exp = float(exp) base = float(base.replace("^", "")) factor = base ** exp stripped = str(value).replace(item.group("scale"), "") value = stripped factors.append(factor) _LOGGER.debug( "Replaced {} by factor {}".format(item.group("scale"), factor) ) else: factors.append(1) continue return value, factors def build_unit_name(dimensions, lang="en_US"): name = _get_parser(lang).name_from_dimensions(dimensions) _LOGGER.debug("\tUnit inferred name: %s", name) return name def get_unit_from_dimensions(dimensions, text, lang="en_US"): key = load.get_key_from_dimensions(dimensions) try: unit = load.units(lang).derived[key] except KeyError: _LOGGER.debug(u"\tCould not find unit for: %s", key) unit = cls.Unit( name=build_unit_name(dimensions, lang), dimensions=dimensions, entity=get_entity_from_dimensions(dimensions, text, lang), ) unit.original_dimensions = dimensions return unit def name_from_dimensions(dimensions, lang="en_US"): return _get_parser(lang).name_from_dimensions(dimensions) def infer_name(unit): name = name_from_dimensions(unit.dimensions) if unit.dimensions else None return name def get_entity_from_dimensions(dimensions, text, lang="en_US"): new_derived = [ {"base": load.units(lang).names[i["base"]].entity.name, "power": i["power"]} for i in dimensions ] final_derived = sorted(new_derived, key=lambda x: x["base"]) key = load.get_key_from_dimensions(final_derived) ent = dis.disambiguate_entity(key, text, lang) if ent is None: _LOGGER.debug("\tCould not find entity for: %s", key) ent = cls.Entity(name="unknown", dimensions=new_derived) return ent
MIT License
radakb/pynamd
pynamd/msmle/msmle.py
MSMLE.compute_unsampled_weights
python
def compute_unsampled_weights(self, u_jn): u_nj_k = self._validate_and_convert_2d_energies(u_jn) f_k = self.compute_unsampled_free_energies(u_jn, False)[0] logQ_nj_k = f_k[newaxis, :] - u_nj_k logQ_nj_m = self._f_m[newaxis, :] - self._u_nj_m logNorm_n = logsumexp(logQ_nj_m, 1, self.PIsdiag[newaxis, :]) return exp(logQ_nj_k - logNorm_n[:, newaxis])
Compute the sample weights for unsampled states. This requires the observed reduced potential energies on the complete sample set. NB Use of this function can be obviated by simply including the unsampled states in the initial calculation. Arguments --------- u_jn : array-like 2d array-like of reduced potential energies. The dimensions must be L x N, where L is the number of states to compute free energies for and N is the total sample size. Returns ------- W_nj_k : ndarray Sample weights for the N samples in each of L states
https://github.com/radakb/pynamd/blob/e31b8529e7052725916ebfa1ce7dd089d72117ec/pynamd/msmle/msmle.py#L488-L513
from __future__ import division, print_function from numpy import asarray, atleast_2d, zeros, ones, tile, hstack, vstack from numpy import int32, float64 from numpy import log, exp, sqrt, abs, min, any, all, where, nonzero from numpy import identity, diagflat, diagonal, newaxis from numpy.random import randint from numpy.linalg import solve, inv from scipy.optimize import minimize try: from scipy.misc import logsumexp except ImportError: from scipy.special import logsumexp class MSMLE(object): def __init__(self, reduced_potentials, nsamples): self.nsamples = asarray(nsamples, int32) if self.nsamples.ndim != 1: raise ValueError('\'nsamples\' must be a one dimensional' ' array-like object of integers.') if any(self.nsamples < 0): raise ValueError('\'nsamples\' must all be either zero or' ' positive') self.total_samples = self.nsamples.sum() self.PIs = diagflat(self.nsamples_nonzero / self.total_samples) _u_ijn = asarray(reduced_potentials, float64) n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates nmax = self.nsamples.max() msg2 = ("'reduced_energies' expected as %d x %d array-like object" 'of floats'%(mpk, n)) msg3 = ("'reduced_energies' expected as %d x %d x %d array-like object" 'of floats'%(m, mpk, nmax)) if _u_ijn.ndim == 2 and _u_ijn.shape != (mpk, n): raise ValueError(msg2) elif _u_ijn.ndim == 3 and _u_ijn.shape != (m, mpk, nmax): raise ValueError(msg3) self._mask_resample = None self._u_nj_m = zeros((n, m)) self._u_nj_k = zeros((n, mpk-m)) mask0, maskn0 = self.mask_zero, self.mask_nonzero if _u_ijn.ndim == 2: for i, ni in enumerate(self.nsamples_nonzero): j = self.nsamples_nonzero[:i].sum() self._u_nj_m[j:(j+ni), :] = _u_ijn[maskn0, j:(j+ni)].T self._u_nj_k[j:(j+ni), :] = _u_ijn[mask0, j:(j+ni)].T elif _u_ijn.ndim == 3: for i, ni in enumerate(self.nsamples_nonzero): j = self.nsamples_nonzero[:i].sum() self._u_nj_m[j:(j+ni), :] = _u_ijn[i, maskn0, :ni].T self._u_nj_k[j:(j+ni), :] = _u_ijn[i, mask0, :ni].T shift = self._u_nj_m.min() self._u_nj_m -= shift self._u_nj_k -= shift del _u_ijn def resample(self): self.revert_sample() self._mask_resample = zeros(self.total_samples, int32) self._u_nj_m_orig = self._u_nj_m.copy() if hasattr(self, '_f'): self._f_orig = self._f.copy() for i, ni in enumerate(self.nsamples_nonzero): j = self.nsamples_nonzero[:i].sum() mask = randint(j, j+ni, ni) self._mask_resample[j:(j+ni)] = mask self._u_nj_m[j:(j+ni), :] = self._u_nj_m_orig[mask, :] def revert_sample(self): if self._mask_resample is not None: self._u_nj_m = self._u_nj_m_orig del self._u_nj_m_orig if hasattr(self, '_f_orig'): self._f = self._f_orig del self._f_orig else: del self._f del self._mask_resample self._mask_resample = None @property def nstates(self): return self.nsamples.size @property def mask_nonzero(self): return nonzero(self.nsamples)[0] @property def nsamples_nonzero(self): return self.nsamples[self.mask_nonzero] @property def nstates_sampled(self): return self.mask_nonzero.size @property def mask_zero(self): return where(self.nsamples == 0)[0] @property def nstates_unsampled(self): return self.mask_zero.size @property def PIsdiag(self): return diagonal(self.PIs) @property def _u_nj(self): if self._mask_resample is None: _u_nj_k = self._u_nj_k else: _u_nj_k = self._u_nj_k[self._mask_resample, :] return hstack((self._u_nj_m, _u_nj_k)) @property def u_nj(self): _u_nj = self._u_nj u_nj = zeros((self.total_samples, self.nstates)) u_nj[:, self.mask_nonzero] = _u_nj[:, :self.nstates_sampled] u_nj[:, self.mask_zero] = _u_nj[:, self.nstates_sampled:] return u_nj @property def f(self): f = zeros(self.nstates) f[self.mask_nonzero] = self._f[:self.nstates_sampled] f[self.mask_zero] = self._f[self.nstates_sampled:] return f @property def _f_m(self): return self._f[:self.nstates_sampled] @property def _f_k(self): return self._f[self.nstates_sampled:] @property def _W_nj(self): m = self.nstates_sampled logQ_nj = self._f[newaxis, :] - self._u_nj logNorm_n = logsumexp(logQ_nj[:, :m], 1, self.PIsdiag[newaxis, :]) _W_nj = exp(logQ_nj - logNorm_n[:, newaxis]) return _W_nj @property def W_nj(self): m = self.nstates_sampled _W_nj = self._W_nj W_nj = zeros(_W_nj.shape) W_nj[:, self.mask_nonzero] = _W_nj[:, :m] W_nj[:, self.mask_zero] = _W_nj[:, m:] return W_nj def _validate_and_convert_2d(self, A_jn): _A_jn = asarray(A_jn) n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates nmax = self.nsamples.max() if _A_jn.ndim == 1: assert _A_jn.size == n A_n = A_jn elif _A_jn.ndim == 2: assert _A_jn.shape == (m, nmax) if all(self.nsamples == self.nsamples[0]): A_n = A_jn.ravel() else: A_n = zeros(self.total_samples) for i, ni in enumerate(self.nsamples_nonzero): j = self.nsamples_nonzero[:i].sum() A_n[j:(j+ni)] += A_jn[i, :ni] else: raise ValueError('Bad input shape') if self._mask_resample is None: return A_n else: return A_n[self._mask_resample] def _validate_and_convert_2d_energies(self, u_jn): u_nj_k = atleast_2d(u_jn).T assert u_nj_k.shape[0] == self.total_samples if self._mask_resample is None: return u_nj_k else: return u_nj_k[self._mask_resample, :] def compute_expectations(self, A_jn, doerror=True): A_n = self._validate_and_convert_2d(A_jn) W_nj = self.W_nj A_j = (W_nj*A_n[:, newaxis]).mean(axis=0) varA_j = zeros(self.nstates) if not doerror: return A_j, varA_j """ There are a number of errors in Ref. 1. First, the definitions of W and WA are incorrect, the extra factors of exp(f) should indeed be included. Doing so obviates the need for the C matrix defined therein. This itself is used incorrectly in that paper since the dimensions are inconsistent during matrix multiplication. NB This is all borne out by R code released with Ref. 1, which uses the same equations below, but with completely different notation (A1 --> G, B1 --> H). The matrix D in Ref. 1 is NOT the same as in the code, where it seems to be the first term in the B matrix from the paper. Shorthand indices - notation similiar to Ref. 1. """ n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates mpk2 = 2*mpk mask0, maskn0 = self.mask_zero, self.mask_nonzero _W_nj = self._W_nj _WA_nj = _W_nj*A_n[:, newaxis] _A_j = _WA_nj.mean(axis=0) WWA_nj = hstack((_W_nj, _WA_nj)) O = WWA_nj.T.dot(WWA_nj) / n Os = O[:, :m] D = hstack((Os.dot(self.PIs), zeros((mpk2, mpk2-m)))) B1 = (D - identity(mpk2))[1:, 1:] A1 = (O - D[:, :m].dot(Os.T))[1:, 1:] V = solve(B1, A1).dot(inv(B1.T)) / n U = zeros((mpk2, mpk2)) U[1:, 1:] = V Ch = hstack((diagflat(-_A_j), identity(mpk))) V_full = Ch.dot(U).dot(Ch.T) varA_j[maskn0] = diagonal(V_full)[:m] varA_j[mask0] = diagonal(V_full)[m:] return A_j, varA_j @property def fvar(self): n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates k = mpk - m mask0, maskn0 = self.mask_zero, self.mask_nonzero _W_nj = self._W_nj O = _W_nj.T.dot(_W_nj) / n Os = O[:, :m] B1 = (hstack((Os.dot(self.PIs), zeros((mpk, k)))) - identity(mpk))[1:, 1:] A1 = (O - Os.dot(self.PIs).dot(Os.T))[1:, 1:] V = solve(B1, A1).dot(inv(B1.T)) / n V_full = zeros((mpk, mpk)) V_full[1:, 1:] = V var = zeros(mpk) var[maskn0] += diagonal(V_full)[:m] var[mask0] += diagonal(V_full)[m:] var += var[0] var[0] = 0.0 return var def compute_unsampled_free_energies(self, u_jn, doerror=True): u_nj_k = self._validate_and_convert_2d_energies(u_jn) logQ_nj_m = self._f_m[newaxis, :] - self._u_nj_m nsamples = self.nsamples_nonzero[newaxis, :] logw_n = -(u_nj_k + logsumexp(logQ_nj_m, 1, nsamples)[:, newaxis]) f_k = -logsumexp(logw_n, axis=0) varf_k = zeros(f_k.size) if not doerror: return f_k, varf_k n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates mask0, maskn0 = self.mask_zero, self.mask_nonzero newk = u_nj_k.shape[1] mpk += newk k = mpk - m W_nj_k = self.compute_unsampled_weights(u_jn) _W_nj = hstack((self._W_nj, W_nj_k)) O = _W_nj.T.dot(_W_nj) / n Os = O[:, :m] B1 = (hstack((Os.dot(self.PIs), zeros((mpk, k)))) - identity(mpk))[1:, 1:] A1 = (O - Os.dot(self.PIs).dot(Os.T))[1:, 1:] V = solve(B1, A1).dot(inv(B1.T)) / n V_full = zeros((mpk, mpk)) V_full[1:, 1:] = V var = zeros(mpk) var[maskn0] += diagonal(V_full)[:m] var[mask0] += diagonal(V_full)[m:mpk-newk] varf_k += diagonal(V_full)[mpk-newk:] + var[0] return f_k, varf_k
MIT License
siddhi-io/pysiddhi
PySiddhi/sp/ObjectMapping/FieldMapping.py
FieldMapping.__init__
python
def __init__(self, decode_function, encode_function=str, default_value=NotSet(), addDefaultField=False): self.encode_function = encode_function self.decode_function = decode_function self.default_value = default_value self.addDefaultField = addDefaultField
Creates a field mapping between JSON Object field and API Object field :param decode_function: converts JSON field value to APIObject field value :param encode_function: converts APIObject field value JSON Object field value :param default_value: default value of APIObject field :param addDefaultField: set True to include the field in JSON Object even if the value is default
https://github.com/siddhi-io/pysiddhi/blob/d58af41d243307f8f5bb27077467fa69866117fe/PySiddhi/sp/ObjectMapping/FieldMapping.py#L39-L50
from PySiddhi.sp.ObjectMapping.APIObject import NotSet from PySiddhi.sp.__Util import encodeField, decodeField def strOrInt(v): v = str(v) if str.isnumeric(v): return int(v) else: return v class FieldMapping(object):
Apache License 2.0
pfnet-research/chainer-deepfill
src/inpaint_ops.py
resize_mask_like
python
def resize_mask_like(mask, x): if mask.shape[2] < x.shape[2]: mask_resize = F.unpooling_2d( mask, ksize=4, outsize=x.shape[2:]) else: rate = mask.shape[2] // x.shape[2] mask_resize = mask[:, :, ::rate, ::rate] return mask_resize
Resize mask like shape of x. Args: mask: Original mask. x: To shape of x.
https://github.com/pfnet-research/chainer-deepfill/blob/46ee774bba13e2e0459e1ad1331d0546a6a61c61/src/inpaint_ops.py#L153-L165
import random import math import chainer from chainer import cuda from chainer import links as L from chainer import functions as F from chainer.link_hooks.spectral_normalization import SpectralNormalization import cv2 import numpy as np class GenConv(chainer.Chain): def __init__(self, ch0, ch1, ksize, stride=1, rate=1, padding="SAME", activation="elu", gated=False): super(GenConv, self).__init__() self.activation = eval("F." + activation) if activation else None self.gated = gated initializer = chainer.initializers.GlorotUniform() if padding == "SAME": pad = ((ksize - 1) * rate + 1 - stride + 1) // 2 else: assert False with self.init_scope(): if gated: self.conv = L.Convolution2D(ch0, ch1 * 2, ksize, stride, pad, dilate=rate, initialW=initializer) else: self.conv = L.Convolution2D(ch0, ch1, ksize, stride, pad, dilate=rate, initialW=initializer) def __call__(self, x): h = self.conv(x) if self.gated: h, mask = F.split_axis(h, 2, axis=1) if self.activation: h = self.activation(h) if self.gated: h = h * F.sigmoid(mask) return h class GenDeconv(chainer.Chain): def __init__(self, ch0, ch1, padding="SAME", gated=False): super(GenDeconv, self).__init__() with self.init_scope(): self.genconv = GenConv(ch0, ch1, 3, 1, padding=padding, gated=gated) def __call__(self, x): h_size, w_size = x.shape[2:] h = F.unpooling_2d(x, ksize=2, outsize=(h_size * 2, w_size * 2)) h = self.genconv(h) return h class DisConv(chainer.Chain): def __init__(self, ch0, ch1, ksize=5, stride=2, sn=False): super(DisConv, self).__init__() initializer = chainer.initializers.GlorotUniform(math.sqrt(2)) pad = (ksize - stride + 1) // 2 with self.init_scope(): if sn: self.conv = L.Convolution2D(ch0, ch1, ksize, stride, pad, initialW=initializer).add_hook( SpectralNormalization()) else: self.conv = L.Convolution2D(ch0, ch1, ksize, stride, pad, initialW=initializer) def __call__(self, x): h = F.leaky_relu(self.conv(x)) return h def random_bbox(config): img_shape = config.IMG_SHAPES img_height = img_shape[0] img_width = img_shape[1] maxt = img_height - config.VERTICAL_MARGIN - config.HEIGHT maxl = img_width - config.HORIZONTAL_MARGIN - config.WIDTH t = int(random.uniform(config.VERTICAL_MARGIN, maxt)) l = int(random.uniform(config.HORIZONTAL_MARGIN, maxl)) h = config.HEIGHT w = config.WIDTH return t, l, h, w def bbox2mask(bbox, batchsize, config, xp): def npmask(bbox, height, width, delta_h, delta_w): mask = np.zeros((batchsize, 1, height, width), np.float32) h = np.random.randint(delta_h // 2 + 1) w = np.random.randint(delta_w // 2 + 1) mask[:, :, bbox[0] + h:bbox[0] + bbox[2] - h, bbox[1] + w:bbox[1] + bbox[3] - w] = 1. return mask img_shape = config.IMG_SHAPES height = img_shape[0] width = img_shape[1] mask = npmask(bbox, height, width, config.MAX_DELTA_HEIGHT, config.MAX_DELTA_WIDTH) return xp.array(mask) def free_form_mask(xp, batchsize, size=(256, 256), maxVertex=20, minLength=50, maxLength=200, minBrushWidth=10, maxBrushWidth=40, maxAngle=20): imageHeight, imageWidth = size mask = np.zeros((imageHeight, imageWidth), dtype="float32") numVertex = int(random.uniform(2, maxVertex)) startX = int(random.uniform(0, imageWidth - 1)) startY = int(random.uniform(0, imageHeight - 1)) for i in range(numVertex): angle = random.uniform(-maxAngle, maxAngle) if i % 2 == 0: angle = 180 - angle length = random.uniform(minLength, maxLength) brushWidth = int(random.uniform(minBrushWidth, maxBrushWidth)) endX = np.clip(startX + int(length * np.sin(np.deg2rad(angle))), 0, imageWidth) endY = np.clip(startY + int(length * np.cos(np.deg2rad(angle))), 0, imageHeight) cv2.line(mask, (startX, startY), (endX, endY), 255, brushWidth) startX = endX startY = endY mask = mask.reshape(1, 1, imageHeight, imageWidth) mask = np.tile(mask, (batchsize, 1, 1, 1)) return xp.array(mask.reshape(batchsize, 1, imageHeight, imageWidth), dtype="float32") / 255. def local_patch(x, bbox): return x[:, :, bbox[0]:bbox[0] + bbox[2], bbox[1]:bbox[1] + bbox[3]]
MIT License
xuezhemax/flowseq
flownmt/flows/linear.py
InvertibleLinearFlow.forward
python
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: dim = input.dim() out = F.linear(input, self.weight) _, logdet = torch.slogdet(self.weight) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet
Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\partial output / \partial input`
https://github.com/xuezhemax/flowseq/blob/8cb4ae00c26fbeb3e1459e3b3b90e7e9a84c3d2b/flownmt/flows/linear.py#L27-L48
from overrides import overrides from typing import Dict, Tuple import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from flownmt.flows.flow import Flow class InvertibleLinearFlow(Flow): def __init__(self, in_features, inverse=False): super(InvertibleLinearFlow, self).__init__(inverse) self.in_features = in_features self.weight = Parameter(torch.Tensor(in_features, in_features)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) @overrides
Apache License 2.0
pypa/pipenv
pipenv/vendor/requirementslib/utils.py
convert_entry_to_path
python
def convert_entry_to_path(path): if not isinstance(path, Mapping): raise TypeError("expecting a mapping, received {0!r}".format(path)) if not any(key in path for key in ["file", "path"]): raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path)) if "file" in path: path = vistir.path.url_to_path(path["file"]) elif "path" in path: path = path["path"] if not os.name == "nt": return fs_decode(path) return Path(fs_decode(path)).as_posix()
Convert a pipfile entry to a string
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/vendor/requirementslib/utils.py#L168-L185
from __future__ import absolute_import, print_function import logging import os import sys import pip_shims.shims import six import six.moves import tomlkit import vistir from six.moves.urllib.parse import urlparse, urlsplit, urlunparse from vistir.compat import Path, fs_decode from vistir.path import ensure_mkdir_p, is_valid_url from .environment import MYPY_RUNNING six.add_move( six.MovedAttribute("Mapping", "collections", "collections.abc") ) six.add_move( six.MovedAttribute("Sequence", "collections", "collections.abc") ) six.add_move( six.MovedAttribute("Set", "collections", "collections.abc") ) six.add_move( six.MovedAttribute("ItemsView", "collections", "collections.abc") ) from six.moves import ItemsView, Mapping, Sequence, Set if MYPY_RUNNING: from typing import Dict, Any, Optional, Union, Tuple, List, Iterable, Text, TypeVar STRING_TYPE = Union[bytes, str, Text] S = TypeVar("S", bytes, str, Text) PipfileEntryType = Union[STRING_TYPE, bool, Tuple[STRING_TYPE], List[STRING_TYPE]] PipfileType = Union[STRING_TYPE, Dict[STRING_TYPE, PipfileEntryType]] VCS_LIST = ("git", "svn", "hg", "bzr") def setup_logger(): logger = logging.getLogger("requirementslib") loglevel = logging.DEBUG handler = logging.StreamHandler(stream=sys.stderr) handler.setLevel(loglevel) logger.addHandler(handler) logger.setLevel(loglevel) return logger log = setup_logger() SCHEME_LIST = ("http://", "https://", "ftp://", "ftps://", "file://") VCS_SCHEMES = [ "git", "git+http", "git+https", "git+ssh", "git+git", "git+file", "hg", "hg+http", "hg+https", "hg+ssh", "hg+static-http", "svn", "svn+ssh", "svn+http", "svn+https", "svn+svn", "bzr", "bzr+http", "bzr+https", "bzr+ssh", "bzr+sftp", "bzr+ftp", "bzr+lp", ] def is_installable_dir(path): if pip_shims.shims.is_installable_dir(path): return True pyproject_path = os.path.join(path, "pyproject.toml") if os.path.exists(pyproject_path): pyproject = Path(pyproject_path) pyproject_toml = tomlkit.loads(pyproject.read_text()) build_system = pyproject_toml.get("build-system", {}).get("build-backend", "") if build_system: return True return False def strip_ssh_from_git_uri(uri): if isinstance(uri, six.string_types): if "git+ssh://" in uri: parsed = urlparse(uri) path_part, _, path = parsed.path.lstrip("/").partition("/") path = "/{0}".format(path) parsed = parsed._replace( netloc="{0}:{1}".format(parsed.netloc, path_part), path=path ) uri = urlunparse(parsed).replace("git+ssh://", "git+", 1) return uri def add_ssh_scheme_to_git_uri(uri): if isinstance(uri, six.string_types): if uri.startswith("git+") and "://" not in uri: uri = uri.replace("git+", "git+ssh://", 1) parsed = urlparse(uri) if ":" in parsed.netloc: netloc, _, path_start = parsed.netloc.rpartition(":") path = "/{0}{1}".format(path_start, parsed.path) uri = urlunparse(parsed._replace(netloc=netloc, path=path)) return uri def is_vcs(pipfile_entry): if isinstance(pipfile_entry, Mapping): return any(key for key in pipfile_entry.keys() if key in VCS_LIST) elif isinstance(pipfile_entry, six.string_types): if not is_valid_url(pipfile_entry) and pipfile_entry.startswith("git+"): pipfile_entry = add_ssh_scheme_to_git_uri(pipfile_entry) parsed_entry = urlsplit(pipfile_entry) return parsed_entry.scheme in VCS_SCHEMES return False def is_editable(pipfile_entry): if isinstance(pipfile_entry, Mapping): return pipfile_entry.get("editable", False) is True if isinstance(pipfile_entry, six.string_types): return pipfile_entry.startswith("-e ") return False def is_star(val): return (isinstance(val, six.string_types) and val == "*") or ( isinstance(val, Mapping) and val.get("version", "") == "*" )
MIT License
ljvmiranda921/pyswarms
tests/utils/search/conftest.py
random_bounded
python
def random_bounded(): bounds = (np.array([-5, -5]), np.array([5, 5])) options = { "c1": [1, 5], "c2": [6, 10], "k": [11, 15], "w": [0.4, 0.9], "p": 1, } return RandomSearch( LocalBestPSO, n_particles=40, dimensions=20, options=options, objective_func=sphere, iters=10, n_selection_iters=100, bounds=bounds, )
Returns a RandomSearch instance with bounds
https://github.com/ljvmiranda921/pyswarms/blob/ea161d9a932388a2595e777b8f140833406e0a77/tests/utils/search/conftest.py#L78-L97
import numpy as np import pytest from pyswarms.single import LocalBestPSO from pyswarms.utils.functions.single_obj import sphere from pyswarms.utils.search.grid_search import GridSearch from pyswarms.utils.search.random_search import RandomSearch @pytest.fixture def grid(): options = { "c1": [1, 2, 3], "c2": [1, 2, 3], "k": [5, 10, 15], "w": [0.9, 0.7, 0.4], "p": [1], } return GridSearch( LocalBestPSO, n_particles=40, dimensions=20, options=options, objective_func=sphere, iters=10, bounds=None, ) @pytest.fixture def grid_mini(): options = {"c1": [1, 2], "c2": 6, "k": 5, "w": 0.9, "p": 0} return GridSearch( LocalBestPSO, n_particles=40, dimensions=20, options=options, objective_func=sphere, iters=10, bounds=None, ) @pytest.fixture def random_unbounded(): options = { "c1": [1, 5], "c2": [6, 10], "k": [11, 15], "w": [0.4, 0.9], "p": 1, } return RandomSearch( LocalBestPSO, n_particles=40, dimensions=20, options=options, objective_func=sphere, iters=10, n_selection_iters=100, bounds=None, ) @pytest.fixture
MIT License
karchinlab/2020plus
src/utils/python/amino_acid.py
AminoAcid.__set_frame_shift_status
python
def __set_frame_shift_status(self): if 'fs' in self.hgvs_original: self.is_frame_shift = True self.is_non_silent = True elif re.search('[A-Z]\d+[A-Z]+\*', self.hgvs_original): self.is_frame_shift = True self.is_non_silent = True else: self.is_frame_shift = False
Check for frame shift and set the self.is_frame_shift flag.
https://github.com/karchinlab/2020plus/blob/3a645e2dfedbb3857494e8e7f9cf30eb8f4e87cc/src/utils/python/amino_acid.py#L150-L161
import re import logging class AminoAcid(object): def __init__(self, hgvs='', occurrence=1): self.logger = logging.getLogger(__name__) self.is_non_silent = False self.is_synonymous = False if not (type(hgvs) is str or type(hgvs) is type(u'')): self.is_valid = False self.set_mutation_type() elif 'P.' not in hgvs.upper(): self.is_valid = False self.set_mutation_type() else: self.hgvs_original = hgvs hgvs = hgvs.upper().replace('>', '') self.hgvs = hgvs self.occurrence = occurrence self.set_amino_acid(hgvs) self.set_mutation_type() def set_mutation_type(self, mut_type=''): if mut_type: self.mutation_type = mut_type else: if not self.is_valid: self.mutation_type = 'not valid' elif self.unknown_effect: self.mutation_type = 'unknown effect' elif self.is_no_protein: self.mutation_type = 'no protein' elif self.is_missing_info: self.mutation_type = 'missing' else: if self.is_lost_stop: self.mutation_type = 'Nonstop_Mutation' elif self.is_lost_start: self.mutation_type = 'Translation_Start_Site' elif self.is_synonymous: self.mutation_type = 'Silent' elif self.is_missense: self.mutation_type = 'Missense_Mutation' elif self.is_indel: self.mutation_type = 'In_Frame_Indel' elif self.is_nonsense_mutation: self.mutation_type = 'Nonsense_Mutation' elif self.is_frame_shift: self.mutation_type = 'Frame_Shift_Indel' def set_occurrence(self, occur): self.occurrence = occur def set_amino_acid(self, aa): aa = aa.upper() aa = aa[2:] if aa.startswith('P.') else aa self.__set_mutation_status() self.__parse_hgvs_syntax(aa) def __set_mutation_status(self): hgvs_tmp = self.hgvs[2:] if self.hgvs.startswith("P.") else self.hgvs self.__set_unkown_effect(hgvs_tmp) self.__set_no_protein(hgvs_tmp) self.__set_mutation_type(hgvs_tmp) def __set_mutation_type(self, hgvs_string): self.__set_lost_stop_status(hgvs_string) self.__set_lost_start_status(hgvs_string) self.__set_missense_status(hgvs_string) self.__set_indel_status() self.__set_frame_shift_status() self.__set_premature_stop_codon_status(hgvs_string) def __set_missense_status(self, hgvs_string): if re.search('^[A-Z?]\d+[A-Z?]$', hgvs_string): self.is_missense = True self.is_non_silent = True else: self.is_missense = False def __set_lost_start_status(self, hgvs_string): mymatch = re.search('^([A-Z?])(\d+)([A-Z?])$', hgvs_string) if mymatch: grps = mymatch.groups() if int(grps[1]) == 1 and grps[0] != grps[2]: self.is_lost_start = True self.is_non_silent = True else: self.is_lost_start = False else: self.is_lost_start = False
Apache License 2.0
liberai/nspm
gsoc/anand/pipeline_1/pipeline_1_composite/decision_tree.py
decision_tree
python
def decision_tree(input_file, project_name, output_file="decision_tree.csv", url="Use a valid URL", uri_file="Proper URI file", namespace="Valid namespace"): if __name__ == "__main__": f = open(input_file, 'r') lines = f.readlines() pass if not __name__ == "__main__": lines = integrate(namespace=namespace, uri_file=uri_file, project_name=project_name, url=url) final_lines = [] lineno = 1 """ print lines[0].split(',') ['Property', 'Label ', 'Range', 'Fuzzy Score', 'Comment about expr', 'URI', 'Number of Occurrences', 'MVE', 'Optimal Expression\r\n'] """ """ - The lines from the file generated in the previous steps is read and a for loop iterates through ecery row of - First we create a list of all elements seperated by commas. - If the range has the substring person, the we put as question who else what. - We append the question thus generate 2 times as minimum viable instruction and optimal expression. - We create a variable names final lines and add strings, which are formed by adding strings formed by joining the elements of the list delemited by comma in each line. - We also create a string of the question generated delemited by a newline characte and store it in mve as a long string. - We output the series of question in mve_output. - We save the final_lines strind in a file named GS_with_mve.csv delimeted by a newline character. """ mve = "" for line in tqdm(lines): if lineno == 1: lineno += 1 continue line = line.strip().split(',') rng = line[2].lower() lbl = line[1] if 'person' in rng: rng = "who" else: rng = "what" """ line[7] = rng + " is the " + lbl + " of <X>" line[8] = rng + " is the " + lbl + " of <X>" """ if(len(line) < 9): line.append(rng + " is the " + lbl + " of <X>") line.append(rng + " is the " + lbl + " of <X>") else: line[7] = rng + " is the " + lbl + " of <X>" line[8] = rng + " is the " + lbl + " of <X>" mve += rng + " is the " + lbl + " of <X>\n" final_lines.append(",".join(line)) fw = open(project_name+"/"+"mve"+output_file, 'w') fw.write(mve) fw2 = open(project_name+"/"+output_file, 'w') fw2.write("\n".join(final_lines)) return final_lines
print lines[0].split(',') ['Property', 'Label ', 'Range', 'Fuzzy Score', 'Comment about expr', 'URI', 'Number of Occurrences', 'MVE', 'Optimal Expression\r\n']
https://github.com/liberai/nspm/blob/cc352dbbda6751e8cf19769c9440c03e31687829/gsoc/anand/pipeline_1/pipeline_1_composite/decision_tree.py#L8-L77
import sys import re import argparse from tqdm import tqdm from integrate import integrate
MIT License