repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
rapid7/vm-console-client-python
|
rapid7vmconsole/models/agent.py
|
Agent.os
|
python
|
def os(self, os):
self._os = os
|
Sets the os of this Agent.
The full description of the operating system of the asset. # noqa: E501
:param os: The os of this Agent. # noqa: E501
:type: str
|
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/agent.py#L554-L563
|
import pprint
import re
import six
class Agent(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'addresses': 'list[Address]',
'agent_id': 'str',
'assessed_for_policies': 'bool',
'assessed_for_vulnerabilities': 'bool',
'configurations': 'list[Configuration]',
'databases': 'list[Database]',
'files': 'list[File]',
'history': 'list[AssetHistory]',
'host_name': 'str',
'host_names': 'list[HostName]',
'id': 'int',
'ids': 'list[UniqueId]',
'ip': 'str',
'last_assessed_for_vulnerabilities': 'str',
'links': 'list[Link]',
'mac': 'str',
'os': 'str',
'os_fingerprint': 'OperatingSystem',
'raw_risk_score': 'float',
'risk_score': 'float',
'services': 'list[Service]',
'software': 'list[Software]',
'type': 'str',
'user_groups': 'list[GroupAccount]',
'users': 'list[UserAccount]',
'vulnerabilities': 'AssetVulnerabilities'
}
attribute_map = {
'addresses': 'addresses',
'agent_id': 'agentId',
'assessed_for_policies': 'assessedForPolicies',
'assessed_for_vulnerabilities': 'assessedForVulnerabilities',
'configurations': 'configurations',
'databases': 'databases',
'files': 'files',
'history': 'history',
'host_name': 'hostName',
'host_names': 'hostNames',
'id': 'id',
'ids': 'ids',
'ip': 'ip',
'last_assessed_for_vulnerabilities': 'lastAssessedForVulnerabilities',
'links': 'links',
'mac': 'mac',
'os': 'os',
'os_fingerprint': 'osFingerprint',
'raw_risk_score': 'rawRiskScore',
'risk_score': 'riskScore',
'services': 'services',
'software': 'software',
'type': 'type',
'user_groups': 'userGroups',
'users': 'users',
'vulnerabilities': 'vulnerabilities'
}
def __init__(self, addresses=None, agent_id=None, assessed_for_policies=None, assessed_for_vulnerabilities=None, configurations=None, databases=None, files=None, history=None, host_name=None, host_names=None, id=None, ids=None, ip=None, last_assessed_for_vulnerabilities=None, links=None, mac=None, os=None, os_fingerprint=None, raw_risk_score=None, risk_score=None, services=None, software=None, type=None, user_groups=None, users=None, vulnerabilities=None):
self._addresses = None
self._agent_id = None
self._assessed_for_policies = None
self._assessed_for_vulnerabilities = None
self._configurations = None
self._databases = None
self._files = None
self._history = None
self._host_name = None
self._host_names = None
self._id = None
self._ids = None
self._ip = None
self._last_assessed_for_vulnerabilities = None
self._links = None
self._mac = None
self._os = None
self._os_fingerprint = None
self._raw_risk_score = None
self._risk_score = None
self._services = None
self._software = None
self._type = None
self._user_groups = None
self._users = None
self._vulnerabilities = None
self.discriminator = None
if addresses is not None:
self.addresses = addresses
if agent_id is not None:
self.agent_id = agent_id
if assessed_for_policies is not None:
self.assessed_for_policies = assessed_for_policies
if assessed_for_vulnerabilities is not None:
self.assessed_for_vulnerabilities = assessed_for_vulnerabilities
if configurations is not None:
self.configurations = configurations
if databases is not None:
self.databases = databases
if files is not None:
self.files = files
if history is not None:
self.history = history
if host_name is not None:
self.host_name = host_name
if host_names is not None:
self.host_names = host_names
if id is not None:
self.id = id
if ids is not None:
self.ids = ids
if ip is not None:
self.ip = ip
self.last_assessed_for_vulnerabilities = last_assessed_for_vulnerabilities
if links is not None:
self.links = links
if mac is not None:
self.mac = mac
if os is not None:
self.os = os
if os_fingerprint is not None:
self.os_fingerprint = os_fingerprint
if raw_risk_score is not None:
self.raw_risk_score = raw_risk_score
if risk_score is not None:
self.risk_score = risk_score
if services is not None:
self.services = services
if software is not None:
self.software = software
if type is not None:
self.type = type
if user_groups is not None:
self.user_groups = user_groups
if users is not None:
self.users = users
if vulnerabilities is not None:
self.vulnerabilities = vulnerabilities
@property
def addresses(self):
return self._addresses
@addresses.setter
def addresses(self, addresses):
self._addresses = addresses
@property
def agent_id(self):
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
self._agent_id = agent_id
@property
def assessed_for_policies(self):
return self._assessed_for_policies
@assessed_for_policies.setter
def assessed_for_policies(self, assessed_for_policies):
self._assessed_for_policies = assessed_for_policies
@property
def assessed_for_vulnerabilities(self):
return self._assessed_for_vulnerabilities
@assessed_for_vulnerabilities.setter
def assessed_for_vulnerabilities(self, assessed_for_vulnerabilities):
self._assessed_for_vulnerabilities = assessed_for_vulnerabilities
@property
def configurations(self):
return self._configurations
@configurations.setter
def configurations(self, configurations):
self._configurations = configurations
@property
def databases(self):
return self._databases
@databases.setter
def databases(self, databases):
self._databases = databases
@property
def files(self):
return self._files
@files.setter
def files(self, files):
self._files = files
@property
def history(self):
return self._history
@history.setter
def history(self, history):
self._history = history
@property
def host_name(self):
return self._host_name
@host_name.setter
def host_name(self, host_name):
self._host_name = host_name
@property
def host_names(self):
return self._host_names
@host_names.setter
def host_names(self, host_names):
self._host_names = host_names
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def ids(self):
return self._ids
@ids.setter
def ids(self, ids):
self._ids = ids
@property
def ip(self):
return self._ip
@ip.setter
def ip(self, ip):
self._ip = ip
@property
def last_assessed_for_vulnerabilities(self):
return self._last_assessed_for_vulnerabilities
@last_assessed_for_vulnerabilities.setter
def last_assessed_for_vulnerabilities(self, last_assessed_for_vulnerabilities):
if last_assessed_for_vulnerabilities is None:
raise ValueError("Invalid value for `last_assessed_for_vulnerabilities`, must not be `None`")
self._last_assessed_for_vulnerabilities = last_assessed_for_vulnerabilities
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac
@property
def os(self):
return self._os
@os.setter
|
MIT License
|
yinboc/few-shot-meta-baseline
|
meta-dataset/meta_dataset/data/sampling.py
|
compute_num_query
|
python
|
def compute_num_query(images_per_class, max_num_query):
if images_per_class.min() < 2:
raise ValueError('Expected at least 2 images per class.')
return np.minimum(max_num_query, (images_per_class // 2).min())
|
Computes the number of query examples per class in the episode.
Query sets are balanced, i.e., contain the same number of examples for each
class in the episode.
That number is such that the number of query examples corresponds to at most
half of the examples for any of the class in the episode, and is no greater
than `max_num_query`.
Args:
images_per_class: np.array, number of images for each class.
max_num_query: int, number of images for each class.
Returns:
num_query: int, number of query examples per class in the episode.
|
https://github.com/yinboc/few-shot-meta-baseline/blob/779fae39dad3537e7c801049c858923e2a352dfe/meta-dataset/meta_dataset/data/sampling.py#L85-L104
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import imagenet_specification
import numpy as np
from six.moves import zip
RNG = np.random.RandomState(seed=None)
MAX_SPANNING_LEAVES_ELIGIBLE = 392
def sample_num_ways_uniformly(num_classes, min_ways, max_ways):
max_ways = min(max_ways, num_classes)
return RNG.randint(low=min_ways, high=max_ways + 1)
def sample_class_ids_uniformly(num_ways, num_classes):
return RNG.choice(num_classes, num_ways, replace=False)
|
MIT License
|
google-research/seed_rl
|
common/utils.py
|
Aggregator.__init__
|
python
|
def __init__(self, num_envs, specs, name='Aggregator'):
super(Aggregator, self).__init__(name=name)
def create_variable(spec):
z = tf.zeros([num_envs] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._state = tf.nest.map_structure(create_variable, specs)
|
Inits an Aggregator.
Args:
num_envs: int, number of environments.
specs: Structure (as defined by tf.nest) of tf.TensorSpecs that will be
stored for each environment.
name: Name of the scope for the operations.
|
https://github.com/google-research/seed_rl/blob/66e8890261f09d0355e8bf5f1c5e41968ca9f02b/common/utils.py#L464-L478
|
import collections
import pickle
import threading
import time
import timeit
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import tensor_conversion_registry
FLAGS = flags.FLAGS
EnvOutput = collections.namedtuple(
'EnvOutput', 'reward done observation abandoned episode_step')
Settings = collections.namedtuple(
'Settings', 'strategy inference_devices training_strategy encode decode')
MultiHostSettings = collections.namedtuple(
'MultiHostSettings', 'strategy hosts training_strategy encode decode')
def init_learner_multi_host(num_training_tpus: int):
tpu = ''
job_name = None
if tf.config.experimental.list_logical_devices('TPU'):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu, job_name=job_name)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
assert num_training_tpus % topology.num_tasks == 0
num_training_tpus_per_task = num_training_tpus // topology.num_tasks
hosts = []
training_coordinates = []
for per_host_coordinates in topology.device_coordinates:
host = topology.cpu_device_name_at_coordinates(
per_host_coordinates[0], job=job_name)
task_training_coordinates = (
per_host_coordinates[:num_training_tpus_per_task])
training_coordinates.extend([[c] for c in task_training_coordinates])
inference_coordinates = per_host_coordinates[num_training_tpus_per_task:]
hosts.append((host, [
topology.tpu_device_name_at_coordinates(c, job=job_name)
for c in inference_coordinates
]))
training_da = tf.tpu.experimental.DeviceAssignment(topology,
training_coordinates)
training_strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=training_da)
return MultiHostSettings(strategy, hosts, training_strategy, tpu_encode,
tpu_decode)
else:
tf.device('/cpu').__enter__()
any_gpu = tf.config.experimental.list_logical_devices('GPU')
device_name = '/device:GPU:0' if any_gpu else '/device:CPU:0'
strategy = tf.distribute.OneDeviceStrategy(device=device_name)
enc = lambda x: x
dec = lambda x, s=None: x if s is None else tf.nest.pack_sequence_as(s, x)
return MultiHostSettings(
strategy, [('/cpu', [device_name])], strategy, enc, dec)
def init_learner(num_training_tpus):
settings = init_learner_multi_host(num_training_tpus)
if len(settings.hosts) != 1:
raise ValueError(f'Invalid number of hosts: {len(settings.hosts)}')
return Settings(settings.strategy, settings.hosts[0][1],
settings.training_strategy, settings.encode, settings.decode)
class UnrollStore(tf.Module):
def __init__(self,
num_envs,
unroll_length,
timestep_specs,
num_overlapping_steps=0,
name='UnrollStore'):
super(UnrollStore, self).__init__(name=name)
with self.name_scope:
self._full_length = num_overlapping_steps + unroll_length + 1
def create_unroll_variable(spec):
z = tf.zeros(
[num_envs, self._full_length] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._unroll_length = unroll_length
self._num_overlapping_steps = num_overlapping_steps
self._state = tf.nest.map_structure(create_unroll_variable,
timestep_specs)
self._index = tf.Variable(
tf.fill([num_envs], tf.constant(num_overlapping_steps, tf.int32)),
trainable=False,
name='index')
@property
def unroll_specs(self):
return tf.nest.map_structure(lambda v: tf.TensorSpec(v.shape[1:], v.dtype),
self._state)
@tf.function
@tf.Module.with_name_scope
def append(self, env_ids, values):
tf.debugging.assert_equal(
tf.shape(env_ids),
tf.shape(tf.unique(env_ids)[0]),
message=f'Duplicate environment ids in store {self.name}')
tf.nest.map_structure(
lambda s: tf.debugging.assert_equal(
tf.shape(env_ids)[0],
tf.shape(s)[0],
message=(f'Batch dimension must equal the number of environments '
f'in store {self.name}.')),
values)
curr_indices = self._index.sparse_read(env_ids)
unroll_indices = tf.stack([env_ids, curr_indices], axis=-1)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_nd_update(unroll_indices, v)
self._index.scatter_add(tf.IndexedSlices(1, env_ids))
return self._complete_unrolls(env_ids)
@tf.function
@tf.Module.with_name_scope
def reset(self, env_ids):
self._index.scatter_update(
tf.IndexedSlices(self._num_overlapping_steps, env_ids))
j = self._num_overlapping_steps
repeated_env_ids = tf.reshape(
tf.tile(tf.expand_dims(tf.cast(env_ids, tf.int64), -1), [1, j]), [-1])
repeated_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(env_ids)[0]])
indices = tf.stack([repeated_env_ids, repeated_range], axis=-1)
for s in tf.nest.flatten(self._state):
z = tf.zeros(tf.concat([tf.shape(repeated_env_ids),
s.shape[2:]], axis=0), s.dtype)
s.scatter_nd_update(indices, z)
def _complete_unrolls(self, env_ids):
env_indices = self._index.sparse_read(env_ids)
env_ids = tf.gather(
env_ids,
tf.where(tf.equal(env_indices, self._full_length))[:, 0])
env_ids = tf.cast(env_ids, tf.int64)
unrolls = tf.nest.map_structure(lambda s: s.sparse_read(env_ids),
self._state)
j = self._num_overlapping_steps + 1
repeated_start_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(env_ids)[0]])
repeated_end_range = tf.tile(
tf.range(self._full_length - j, self._full_length, dtype=tf.int64),
[tf.shape(env_ids)[0]])
repeated_env_ids = tf.reshape(
tf.tile(tf.expand_dims(env_ids, -1), [1, j]), [-1])
start_indices = tf.stack([repeated_env_ids, repeated_start_range], -1)
end_indices = tf.stack([repeated_env_ids, repeated_end_range], -1)
for s in tf.nest.flatten(self._state):
s.scatter_nd_update(start_indices, s.gather_nd(end_indices))
self._index.scatter_update(
tf.IndexedSlices(1 + self._num_overlapping_steps, env_ids))
return env_ids, unrolls
class PrioritizedReplay(tf.Module):
def __init__(self, size, specs, importance_sampling_exponent,
name='PrioritizedReplay'):
super(PrioritizedReplay, self).__init__(name=name)
self._priorities = tf.Variable(tf.zeros([size]), dtype=tf.float32)
self._buffer = tf.nest.map_structure(
lambda ts: tf.Variable(tf.zeros([size] + ts.shape, dtype=ts.dtype)),
specs)
self.num_inserted = tf.Variable(0, dtype=tf.int64)
self._importance_sampling_exponent = importance_sampling_exponent
@tf.function
@tf.Module.with_name_scope
def insert(self, values, priorities):
tf.nest.assert_same_structure(values, self._buffer)
values = tf.nest.map_structure(tf.convert_to_tensor, values)
append_size = tf.nest.flatten(values)[0].shape[0]
start_index = self.num_inserted
end_index = start_index + append_size
size = self._priorities.shape[0]
insert_indices = tf.range(start_index, end_index) % size
tf.nest.map_structure(
lambda b, v: b.batch_scatter_update(
tf.IndexedSlices(v, insert_indices)),
self._buffer,
values)
self.num_inserted.assign_add(append_size)
self._priorities.batch_scatter_update(
tf.IndexedSlices(priorities, insert_indices))
return insert_indices
@tf.function
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
tf.debugging.assert_greater_equal(
self.num_inserted,
tf.constant(0, tf.int64),
message='Cannot sample if replay buffer is empty')
size = self._priorities.shape[0]
limit = tf.minimum(tf.cast(size, tf.int64), self.num_inserted)
if priority_exp == 0:
indices = tf.random.uniform([num_samples], maxval=limit, dtype=tf.int64)
weights = tf.ones_like(indices, dtype=tf.float32)
else:
prob = self._priorities[:limit]**priority_exp
prob /= tf.reduce_sum(prob)
indices = tf.random.categorical([tf.math.log(prob)], num_samples)[0]
weights = (((1. / tf.cast(limit, tf.float32)) /
tf.gather(prob, indices)) **
self._importance_sampling_exponent)
weights /= tf.reduce_max(weights)
sampled_values = tf.nest.map_structure(
lambda b: b.sparse_read(indices), self._buffer)
return indices, weights, sampled_values
@tf.function
@tf.Module.with_name_scope
def update_priorities(self, indices, priorities):
self._priorities.batch_scatter_update(tf.IndexedSlices(priorities, indices))
class HindsightExperienceReplay(PrioritizedReplay):
def __init__(self, size, specs, importance_sampling_exponent,
compute_reward_fn,
unroll_length,
substitution_probability,
name='HindsightExperienceReplay'):
super(HindsightExperienceReplay, self).__init__(
size, specs, importance_sampling_exponent, name)
self._compute_reward_fn = compute_reward_fn
self._unroll_length = unroll_length
self._substitution_probability = substitution_probability
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
indices, weights, sampled_values = super(
HindsightExperienceReplay, self).sample(num_samples, priority_exp)
observation = sampled_values.env_outputs.observation
batch_size, time_horizon = observation['achieved_goal'].shape[:2]
def compute_goal_reward():
goal_reward = self._compute_reward_fn(
achieved_goal=observation['achieved_goal'][:, 1:],
desired_goal=observation['desired_goal'][:, :-1])
return tf.concat(values=[goal_reward[:, :1] * np.nan, goal_reward],
axis=1)
old_goal_reward = compute_goal_reward()
assert old_goal_reward.shape == observation['achieved_goal'].shape[:-1]
goal_ind = tf.concat(
values=[tf.random.uniform((batch_size, 1), min(t + 1, time_horizon - 1),
time_horizon, dtype=tf.int32)
for t in range(time_horizon)], axis=1)
substituted_goal = tf.gather(observation['achieved_goal'],
goal_ind, axis=1, batch_dims=1)
mask = tf.cast(tfp.distributions.Bernoulli(
probs=self._substitution_probability *
tf.ones(goal_ind.shape)).sample(), observation['desired_goal'].dtype)
mask *= tf.cast(~sampled_values.env_outputs.done,
observation['desired_goal'].dtype)
mask = mask[..., tf.newaxis]
observation['desired_goal'] = (
mask * substituted_goal + (1 - mask) * observation['desired_goal'])
new_goal_reward = compute_goal_reward()
assert new_goal_reward.shape == observation['achieved_goal'].shape[:-1]
sampled_values = sampled_values._replace(
env_outputs=sampled_values.env_outputs._replace(
reward=sampled_values.env_outputs.reward +
(new_goal_reward - old_goal_reward) * tf.cast(
~sampled_values.env_outputs.done, tf.float32)
))
assert time_horizon >= self._unroll_length + 1
unroll_begin_ind = tf.random.uniform(
(batch_size,), 0, time_horizon - self._unroll_length, dtype=tf.int32)
unroll_inds = unroll_begin_ind[:, tf.newaxis] + tf.math.cumsum(
tf.ones((batch_size, self._unroll_length + 1), tf.int32),
axis=1, exclusive=True)
subsampled_values = tf.nest.map_structure(
lambda t: tf.gather(t, unroll_inds, axis=1, batch_dims=1),
sampled_values)
if hasattr(sampled_values, 'agent_state'):
subsampled_values = subsampled_values._replace(
agent_state=sampled_values.agent_state)
return indices, weights, subsampled_values
class Aggregator(tf.Module):
|
Apache License 2.0
|
neozhaoliang/pywonderland
|
src/polytopes/polytopes/coxeter_plane.py
|
draw_on_coxeter_plane
|
python
|
def draw_on_coxeter_plane(
P, svgpath=None, image_size=600, linewidth=0.0012, markersize=0.015
):
P.build_geometry()
M = P.mirrors
C = 2 * np.dot(M, M.T)
eigenvals, eigenvecs = np.linalg.eigh(C)
u = eigenvecs[:, 0]
v = eigenvecs[:, -1]
u = np.dot(u, M)
v = np.dot(v, M)
u = helpers.normalize(u)
v = helpers.normalize(v)
vertices_2d = [(np.dot(u, x), np.dot(v, x)) for x in P.vertices_coords]
if svgpath is None:
svgpath = P.__class__.__name__ + ".svg"
extent = 0.99
surface = cairo.SVGSurface(svgpath, image_size, image_size)
ctx = cairo.Context(surface)
ctx.scale(image_size / (extent * 2.0), -image_size / (extent * 2.0))
ctx.translate(extent, -extent)
ctx.set_source_rgb(1, 1, 1)
ctx.paint()
ctx.set_line_width(linewidth)
for elist in P.edge_indices:
for i, j in elist:
x1, y1 = vertices_2d[i]
x2, y2 = vertices_2d[j]
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(linewidth)
ctx.move_to(x1, y1)
ctx.line_to(x2, y2)
ctx.stroke()
ctx.set_line_width(linewidth * 2)
for x, y in vertices_2d:
ctx.arc(x, y, markersize, 0, 2 * np.pi)
ctx.set_source_rgb(1, 0, 0)
ctx.fill_preserve()
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
surface.finish()
|
Project the vertices of a polytope `P` to its Coxeter plane
and draw the pattern to a svg image.
The most important parameters are `nodes1` and `nodes2`, they
can be of lists/tuples/sets type and must partition the Coxeter
diagram of `P` into two disjoint sets such that the nodes in
each set are mutually orthogonal with each other.
|
https://github.com/neozhaoliang/pywonderland/blob/4fc110ba2e7db7db0a0d89369f02c479282239db/src/polytopes/polytopes/coxeter_plane.py#L11-L70
|
try:
import cairocffi as cairo
except ImportError:
import cairo
import numpy as np
from . import helpers
|
MIT License
|
vrenkens/nabu
|
nabu/neuralnetworks/trainers/trainer.py
|
Trainer.aditional_loss
|
python
|
def aditional_loss(self):
|
add an aditional loss
returns:
the aditional loss or None
|
https://github.com/vrenkens/nabu/blob/39deb62d182c7036c72f0f7eeb1d5c8eb0fb20fd/nabu/neuralnetworks/trainers/trainer.py#L816-L820
|
import os
import time
import cPickle as pickle
from abc import ABCMeta, abstractmethod
from math import ceil
import tensorflow as tf
from tensorflow.python.client import device_lib
from nabu.processing import input_pipeline
from nabu.neuralnetworks.trainers import loss_functions
from nabu.neuralnetworks.models.model import Model
from nabu.neuralnetworks.evaluators import evaluator_factory
from nabu.neuralnetworks.components import hooks, ops, constraints
from nabu.tools.default_conf import apply_defaults
class Trainer(object):
__metaclass__ = ABCMeta
def __init__(self,
conf,
dataconf,
modelconf,
evaluatorconf,
expdir,
server,
task_index):
self.conf = dict(conf.items('trainer'))
default = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'defaults',
type(self).__name__.lower() + '.cfg')
apply_defaults(self.conf, default)
self.dataconf = dataconf
self.evaluatorconf = evaluatorconf
self.expdir = expdir
self.server = server
self.task_index = task_index
if ('norm_constraint' in self.conf
and self.conf['norm_constraint'] != 'None'):
constraint = constraints.MaxNorm(int(self.conf['norm_constraint']))
else:
constraint = None
self.model = Model(
conf=modelconf,
trainlabels=int(self.conf['trainlabels']),
constraint=constraint)
def _create_graph(self):
cluster = tf.train.ClusterSpec(self.server.server_def.cluster)
outputs = {}
device, chief_ps = self._device(cluster)
outputs['global_step'] = tf.get_variable(
name='global_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
outputs['increment_step'] = outputs['global_step'].assign_add(1).op
should_terminate = tf.get_variable(
name='should_terminate',
shape=[],
dtype=tf.bool,
initializer=tf.constant_initializer(False),
trainable=False)
outputs['terminate'] = should_terminate.assign(True).op
if [x for x in device_lib.list_local_devices()
if x.device_type == 'GPU']:
outputs['memory_usage'] = tf.contrib.memory_stats.MaxBytesInUse()
outputs['memory_limit'] = tf.contrib.memory_stats.BytesLimit()
else:
outputs['memory_usage'] = outputs['memory_limit'] = tf.no_op()
with tf.device(device):
with tf.variable_scope('train'):
outputs['done'] = self._done(cluster)
(inputs,
input_seq_length,
targets,
target_seq_length,
num_steps,
outputs['read_data'],
outputs['local_steps']) = self._data(chief_ps)
outputs['num_steps'] = num_steps*int(self.conf['num_epochs'])
outputs['should_stop'] = tf.logical_or(
tf.greater_equal(
outputs['global_step'],
outputs['num_steps']),
should_terminate)
logits, logit_seq_length = self.model(
inputs=inputs,
input_seq_length=input_seq_length,
targets=targets,
target_seq_length=target_seq_length,
is_training=True)
learning_rate_fact = tf.get_variable(
name='learning_rate_fact',
shape=[],
initializer=tf.constant_initializer(1.0),
trainable=False)
outputs['learning_rate'] = (tf.train.exponential_decay(
learning_rate=float(self.conf['initial_learning_rate']),
global_step=outputs['global_step'],
decay_steps=outputs['num_steps'],
decay_rate=float(self.conf['learning_rate_decay']))
* learning_rate_fact)
outputs['loss'] = loss_functions.factory(
self.conf['loss'])(
targets,
logits,
logit_seq_length,
target_seq_length)
aditional_loss = self.aditional_loss()
if aditional_loss is not None:
outputs['loss'] += aditional_loss
outputs['loss'] += tf.reduce_sum(
tf.losses.get_regularization_losses())
outputs['update_op'] = self._update(
loss=outputs['loss'],
learning_rate=outputs['learning_rate'],
cluster=cluster)
if self.evaluatorconf.get('evaluator', 'evaluator') != 'None':
with tf.variable_scope('validate'):
validated_step = tf.get_variable(
name='validated_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(
-int(self.conf['valid_frequency'])),
trainable=False)
outputs['should_validate'] = tf.greater_equal(
outputs['global_step'] - validated_step,
int(self.conf['valid_frequency']))
with tf.variable_scope('validation'):
outputs['validation_loss'], outputs['update_loss'], outputs['valbatches'] = self._validate()
outputs['half_lr'] = learning_rate_fact.assign(
learning_rate_fact/2).op
outputs['update_validated_step'] = validated_step.assign(
outputs['global_step']).op
outputs['best_validation'] = tf.get_variable(
name='best_validation',
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(1.79e+308),
trainable=False)
outputs['update_best'] = outputs['best_validation'].assign(
outputs['validation_loss']).op
waiting_workers = tf.get_variable(
name='waiting_workers',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
outputs['waiting'] = waiting_workers.assign_add(1).op
outputs['reset_waiting'] = waiting_workers.initializer
if 'local' in cluster.as_dict():
outputs['all_waiting'] = tf.constant(True)
else:
outputs['all_waiting'] = tf.equal(
waiting_workers,
len(cluster.as_dict()['worker'])-1)
outputs['val_loss_summary'] = tf.summary.scalar(
'validation loss',
outputs['validation_loss'])
outputs['init_validation'] = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
'validate/validation'))
else:
outputs['update_loss'] = None
tf.summary.scalar('learning rate', outputs['learning_rate'],
collections=['training_summaries'])
for param in tf.trainable_variables():
tf.summary.histogram(param.name, param,
collections=['training_summaries'])
outputs['training_summaries'] = tf.summary.merge_all(
'training_summaries')
outputs['eval_summaries'] = tf.summary.merge_all('eval_summaries')
if outputs['eval_summaries'] is None:
outputs['eval_summaries'] = tf.no_op()
return outputs
def _data(self, chief_ps):
with tf.name_scope('get_batch'):
input_names = self.model.conf.get('io', 'inputs').split(' ')
if input_names == ['']:
input_names = []
input_sections = [self.conf[i].split(' ') for i in input_names]
input_dataconfs = []
for sectionset in input_sections:
input_dataconfs.append([])
for section in sectionset:
input_dataconfs[-1].append(
dict(self.dataconf.items(section)))
output_names = self.conf['targets'].split(' ')
if output_names == ['']:
output_names = []
target_sections = [self.conf[o].split(' ') for o in output_names]
target_dataconfs = []
for sectionset in target_sections:
target_dataconfs.append([])
for section in sectionset:
target_dataconfs[-1].append(
dict(self.dataconf.items(section)))
if chief_ps is None:
data_queue_elements, _ = input_pipeline.get_filenames(
input_dataconfs + target_dataconfs)
data_queue = tf.train.string_input_producer(
string_tensor=data_queue_elements,
shuffle=True,
seed=None,
capacity=int(self.conf['batch_size'])*2,
shared_name='data_queue')
else:
with tf.device(chief_ps):
data_queue = tf.FIFOQueue(
capacity=int(self.conf['batch_size'])*2,
shared_name='data_queue',
name='data_queue',
dtypes=[tf.string],
shapes=[[]])
data, seq_length, num_steps, max_length = input_pipeline.input_pipeline(
data_queue=data_queue,
batch_size=int(self.conf['batch_size']),
numbuckets=int(self.conf['numbuckets']),
dataconfs=input_dataconfs + target_dataconfs,
variable_batch_size=(
self.conf['variable_batch_size'] == 'True')
)
if int(self.conf['cut_sequence_length']):
assertops = [tf.assert_equal(seq_length[0], l)
for l in seq_length]
with tf.control_dependencies(assertops):
read_ops = []
components = []
component_lengths = []
for i, batch in enumerate(data):
cut, cut_length, read_op, num_local_steps = _cut_sequence(
batch,
seq_length[i],
int(self.conf['cut_sequence_length']),
max_length)
components.append(cut)
component_lengths.append(cut_length)
read_ops.append(read_op)
else:
num_local_steps = tf.constant(1)
queues = [tf.FIFOQueue(1, b.dtype) for b in data]
length_queues = [tf.FIFOQueue(1, b.dtype) for b in seq_length]
components = [q.dequeue() for q in queues]
component_lengths = [q.dequeue() for q in length_queues]
for i, c in enumerate(components):
c.set_shape(data[i].shape)
component_lengths[i].set_shape(seq_length[i].shape)
read_ops = [q.enqueue(data[i]) for i, q in enumerate(queues)]
read_ops += [q.enqueue(seq_length[i])
for i, q in enumerate(length_queues)]
read_data = tf.group(*read_ops)
inputs = {
input_names[i]: d
for i, d in enumerate(components[:len(input_sections)])}
input_seq_length = {
input_names[i]: d
for i, d in enumerate(component_lengths[:len(input_sections)])}
targets = {
output_names[i]: d
for i, d in enumerate(components[len(input_sections):])}
target_seq_length = {
output_names[i]: d
for i, d in enumerate(component_lengths[len(input_sections):])}
return (inputs,
input_seq_length,
targets,
target_seq_length,
num_steps,
read_data,
num_local_steps)
def _done(self, cluster):
if 'local' in cluster.as_dict():
done = tf.no_op()
else:
num_servers = len(cluster.as_dict()['ps'])
num_replicas = len(cluster.as_dict()['worker'])
done_ops = []
for i in range(num_servers):
with tf.device('job:ps/task:%d' % i):
done_queue = tf.FIFOQueue(
capacity=num_replicas,
dtypes=[tf.bool],
shapes=[[]],
shared_name='done_queue%d' % i,
name='done_queue%d' % i
)
done_ops.append(done_queue.enqueue(True))
done = tf.group(*done_ops)
return done
def _validate(self):
evaltype = self.evaluatorconf.get('evaluator', 'evaluator')
if evaltype != 'None':
evaluator = evaluator_factory.factory(evaltype)(
conf=self.evaluatorconf,
dataconf=self.dataconf,
model=self.model
)
return evaluator.evaluate()
def _device(self, cluster):
if 'local' in cluster.as_dict():
device = tf.DeviceSpec(job='local')
chief_ps = None
else:
num_servers = len(cluster.as_dict()['ps'])
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
num_tasks=num_servers,
load_fn=tf.contrib.training.byte_size_load_fn
)
device = tf.train.replica_device_setter(
ps_tasks=num_servers,
ps_strategy=ps_strategy,
worker_device='/job:worker/task:%d' % self.task_index,
cluster=cluster)
chief_ps = tf.DeviceSpec(
job='ps',
task=0)
return device, chief_ps
def _update(self, loss, learning_rate, cluster):
optimizer = tf.train.AdamOptimizer(learning_rate)
if int(self.conf['numbatches_to_aggregate']) > 0:
if 'local' in cluster.as_dict():
num_workers = 1
else:
num_workers = len(cluster.as_dict()['worker'])
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=int(
self.conf['numbatches_to_aggregate']),
total_num_replicas=num_workers)
tf.summary.scalar('training_loss', loss,
collections=['training_summaries'])
trainable = tf.trainable_variables()
untrainable = tf.get_collection('untrainable')
trainable = [var for var in trainable
if var not in untrainable]
grads_and_vars = optimizer.compute_gradients(
loss=loss,
var_list=trainable)
with tf.variable_scope('clip'):
grads_and_vars = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in grads_and_vars]
apply_gradients_op = optimizer.apply_gradients(
grads_and_vars=grads_and_vars,
name='apply_gradients')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_op = tf.group(
*([apply_gradients_op] + update_ops),
name='update')
return update_op
def train(self, testing=False):
master = self.server.target
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
num_tries = 0
is_chief = self.task_index == 0
graph = tf.Graph()
with graph.as_default():
outputs = self._create_graph()
scaffold = tf.train.Scaffold()
if testing:
return
with graph.as_default():
save_hook = hooks.SaveAtEnd(
os.path.join(self.expdir, 'model', 'network.ckpt'),
self.model.variables)
validation_hook = hooks.ValidationSaveHook(
os.path.join(self.expdir, 'logdir', 'validated.ckpt'),
self.model)
with tf.train.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=os.path.join(self.expdir, 'logdir'),
scaffold=scaffold,
hooks=[hooks.StopHook(outputs['done'])] + self.hooks(outputs),
chief_only_hooks=[save_hook, validation_hook] + self.chief_only_hooks(outputs),
config=config) as sess:
summary_writer = tf.summary.FileWriter(
os.path.join(self.expdir, 'logdir'), graph)
while not (sess.should_stop() or
outputs['should_stop'].eval(session=sess)):
if (outputs['update_loss'] is not None
and outputs['should_validate'].eval(session=sess)):
if is_chief:
print ('WORKER %d: validating model'
% self.task_index)
prev_val_loss = outputs['best_validation'].eval(
session=sess)
outputs['init_validation'].run(session=sess)
for i in range(outputs['valbatches']):
_, summary = sess.run(fetches=[
outputs['update_loss'],
outputs['eval_summaries']])
if summary is not None:
summary_writer.add_summary(summary, i)
summary, global_step = sess.run(fetches=[
outputs['val_loss_summary'],
outputs['global_step']
])
summary_writer.add_summary(summary, global_step)
validation_loss = outputs['validation_loss'].eval(
session=sess)
print ('WORKER %d: validation loss: %f' %
(self.task_index, validation_loss))
if validation_loss >= prev_val_loss:
print ('WORKER %d: validation loss is worse' %
self.task_index)
if self.conf['num_tries'] != 'None':
if num_tries == int(self.conf['num_tries']):
validation_hook.restore()
print ('WORKER %d: terminating training'
% self.task_index)
outputs['terminate'].run(session=sess)
break
num_tries += 1
if self.conf['go_back'] == 'True':
while not outputs['all_waiting'].eval(
session=sess):
time.sleep(1)
outputs['reset_waiting'].run(session=sess)
print ('WORKER %d: loading previous model'
% self.task_index)
validation_hook.restore()
else:
outputs['update_validated_step'].run(
session=sess)
if self.conf['valid_adapt'] == 'True':
print ('WORKER %d: halving learning rate'
% self.task_index)
outputs['half_lr'].run(session=sess)
validation_hook.save()
else:
if self.conf['reset_tries'] == 'True':
num_tries = 0
outputs['update_validated_step'].run(
session=sess)
outputs['update_best'].run(session=sess)
outputs['reset_waiting'].run(session=sess)
validation_hook.save()
else:
if (self.conf['go_back'] == 'True'
and self.update_loss is not None):
outputs['waiting'].run(session=sess)
while (
outputs['should_validate'].eval(
session=sess)
and not
outputs['should_stop'].eval(
session=sess)):
time.sleep(1)
if outputs['should_stop'].eval(session=sess):
break
start = time.time()
local_steps, _ = sess.run([outputs['local_steps'],
outputs['read_data']])
for _ in range(local_steps):
_, loss, lr, global_step, memory, limit, summary = sess.run(
fetches=[outputs['update_op'],
outputs['loss'],
outputs['learning_rate'],
outputs['global_step'],
outputs['memory_usage'],
outputs['memory_limit'],
outputs['training_summaries']])
summary_writer.add_summary(summary, global_step)
if memory is not None:
memory_line = '\n\t peak memory usage: %d/%d MB' % (
memory/1e6,
limit/1e6
)
else:
memory_line = ''
print(('WORKER %d: step %d/%d loss: %f, learning rate:'
' %f \n\t time elapsed: %f sec%s')
%(self.task_index,
global_step,
outputs['num_steps'],
loss, lr, time.time()-start,
memory_line))
outputs['increment_step'].run(session=sess)
modelfile = os.path.join(self.expdir, 'model', 'model.pkl')
with open(modelfile, 'wb') as fid:
pickle.dump(self.model, fid)
@abstractmethod
def chief_only_hooks(self, outputs):
@abstractmethod
def hooks(self, outputs):
@abstractmethod
|
MIT License
|
sony/nnabla
|
python/src/nnabla/backward_function/sinh.py
|
sinh_backward
|
python
|
def sinh_backward(inputs):
dy = inputs[0]
x0 = inputs[1]
dx0 = dy * F.cosh(x0)
return dx0
|
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
|
https://github.com/sony/nnabla/blob/fef9b6bca02a002de880a13f3196df14369445f4/python/src/nnabla/backward_function/sinh.py#L19-L31
|
import nnabla.functions as F
|
Apache License 2.0
|
googleapis/python-bigtable
|
google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
|
BigtableTableAdminAsyncClient.list_tables
|
python
|
async def list_tables(
self,
request: bigtable_table_admin.ListTablesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTablesAsyncPager:
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = bigtable_table_admin.ListTablesRequest(request)
if parent is not None:
request.parent = parent
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tables,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=2,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = pagers.ListTablesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
return response
|
r"""Lists all tables served from a specified instance.
Args:
request (:class:`google.cloud.bigtable_admin_v2.types.ListTablesRequest`):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
parent (:class:`str`):
Required. The unique name of the instance for which
tables should be listed. Values are of the form
``projects/{project}/instances/{instance}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager:
Response message for
[google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
Iterating over this object will yield results and
resolve additional pages automatically.
|
https://github.com/googleapis/python-bigtable/blob/a99bf88417d6aec03923447c70c2752f6bb5c459/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py#L408-L497
|
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials
from google.oauth2 import service_account
from google.api_core import operation
from google.api_core import operation_async
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable_admin_v2.types import table as gba_table
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport
from .client import BigtableTableAdminClient
class BigtableTableAdminAsyncClient:
_client: BigtableTableAdminClient
DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
backup_path = staticmethod(BigtableTableAdminClient.backup_path)
parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path)
cluster_path = staticmethod(BigtableTableAdminClient.cluster_path)
parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path)
crypto_key_version_path = staticmethod(
BigtableTableAdminClient.crypto_key_version_path
)
parse_crypto_key_version_path = staticmethod(
BigtableTableAdminClient.parse_crypto_key_version_path
)
instance_path = staticmethod(BigtableTableAdminClient.instance_path)
parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path)
snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path)
parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path)
table_path = staticmethod(BigtableTableAdminClient.table_path)
parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path)
common_billing_account_path = staticmethod(
BigtableTableAdminClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigtableTableAdminClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path)
parse_common_folder_path = staticmethod(
BigtableTableAdminClient.parse_common_folder_path
)
common_organization_path = staticmethod(
BigtableTableAdminClient.common_organization_path
)
parse_common_organization_path = staticmethod(
BigtableTableAdminClient.parse_common_organization_path
)
common_project_path = staticmethod(BigtableTableAdminClient.common_project_path)
parse_common_project_path = staticmethod(
BigtableTableAdminClient.parse_common_project_path
)
common_location_path = staticmethod(BigtableTableAdminClient.common_location_path)
parse_common_location_path = staticmethod(
BigtableTableAdminClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> BigtableTableAdminTransport:
return self._client.transport
get_transport_class = functools.partial(
type(BigtableTableAdminClient).get_transport_class,
type(BigtableTableAdminClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._client = BigtableTableAdminClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_table(
self,
request: bigtable_table_admin.CreateTableRequest = None,
*,
parent: str = None,
table_id: str = None,
table: gba_table.Table = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gba_table.Table:
has_flattened_params = any([parent, table_id, table])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = bigtable_table_admin.CreateTableRequest(request)
if parent is not None:
request.parent = parent
if table_id is not None:
request.table_id = table_id
if table is not None:
request.table = table
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_table,
default_timeout=300.0,
client_info=DEFAULT_CLIENT_INFO,
)
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
return response
async def create_table_from_snapshot(
self,
request: bigtable_table_admin.CreateTableFromSnapshotRequest = None,
*,
parent: str = None,
table_id: str = None,
source_snapshot: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
has_flattened_params = any([parent, table_id, source_snapshot])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = bigtable_table_admin.CreateTableFromSnapshotRequest(request)
if parent is not None:
request.parent = parent
if table_id is not None:
request.table_id = table_id
if source_snapshot is not None:
request.source_snapshot = source_snapshot
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_table_from_snapshot,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
table.Table,
metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata,
)
return response
|
Apache License 2.0
|
brettcannon/desugar
|
desugar/operator.py
|
__contains__
|
python
|
def __contains__(container: Any, item: Any, /) -> bool:
container_type = type(container)
try:
contains_method = debuiltins._mro_getattr(container_type, "__contains__")
except AttributeError:
return debuiltins.any(x is item or x == item for x in container)
else:
if contains_method is None:
raise TypeError(f"{container_type.__name__!r} object is not a container")
is_contained = contains_method(container, item)
return truth(is_contained)
|
Check if the first item contains the second item: `b in a`.
|
https://github.com/brettcannon/desugar/blob/806cba8d49190b47dc3bbdb5a6a8179010a059e5/desugar/operator.py#L307-L319
|
from __future__ import annotations
import typing
from . import builtins as debuiltins
if typing.TYPE_CHECKING:
from typing import Any, Callable
class _BinaryOp(typing.Protocol):
__doc__: str
__name__: str
__qualname__: str
_operator: str
def __call__(self, lhs: Any, rhs: Any, /) -> Any:
...
class _Missing:
_MISSING = _Missing()
def _is_proper_subclass(subcls: type, supercls: type, /):
return (subcls is not supercls) and issubclass(subcls, supercls)
def _create_unary_op(name: str, operator: str) -> Callable[[Any], Any]:
method_name = f"__{name}__"
def unary_op(object_: Any, /) -> Any:
type_ = type(object_)
try:
unary_method = debuiltins._mro_getattr(type_, method_name)
except AttributeError:
raise TypeError(f"bad operand type for unary {operator}: {type_!r}")
else:
return unary_method(object_)
unary_op.__name__ = unary_op.__qualname__ = method_name
unary_op.__doc__ = f"Implement the unary operation `{operator} a`."
return unary_op
neg = __neg__ = _create_unary_op("neg", "-")
pos = __pos__ = _create_unary_op("pos", "+")
inv = __inv__ = invert = __invert__ = _create_unary_op("invert", "~")
def _create_binary_op(name: str, operator: str) -> _BinaryOp:
lhs_method_name = f"__{name}__"
def binary_op(lhs: Any, rhs: Any, /) -> Any:
rhs_method_name = f"__r{name}__"
lhs_type = type(lhs)
try:
lhs_method = debuiltins._mro_getattr(lhs_type, lhs_method_name)
except AttributeError:
lhs_method = _MISSING
try:
lhs_rmethod = debuiltins._mro_getattr(lhs_type, rhs_method_name)
except AttributeError:
lhs_rmethod = _MISSING
rhs_type = type(rhs)
try:
rhs_method = debuiltins._mro_getattr(rhs_type, rhs_method_name)
except AttributeError:
rhs_method = _MISSING
call_lhs = lhs, lhs_method, rhs
call_rhs = rhs, rhs_method, lhs
if (
_is_proper_subclass(rhs_type, lhs_type)
and lhs_rmethod is not rhs_method
):
calls = call_rhs, call_lhs
elif lhs_type is not rhs_type:
calls = call_lhs, call_rhs
else:
calls = (call_lhs,)
for first_obj, meth, second_obj in calls:
if meth is _MISSING:
continue
value = meth(first_obj, second_obj)
if value is not NotImplemented:
return value
else:
exc = TypeError(
f"unsupported operand type(s) for {operator}: {lhs_type!r} and {rhs_type!r}"
)
exc._binary_op = operator
raise exc
binary_op.__name__ = binary_op.__qualname__ = lhs_method_name
binary_op._operator = operator
binary_op.__doc__ = f"""Implement the binary operation `a {operator} b`."""
return binary_op
add = __add__ = _create_binary_op("add", "+")
sub = __sub__ = _create_binary_op("sub", "-")
mul = __mul__ = _create_binary_op("mul", "*")
matmul = __matmul__ = _create_binary_op("matmul", "@")
truediv = __truediv__ = _create_binary_op("truediv", "/")
floordiv = __floordiv__ = _create_binary_op("floordiv", "//")
mod = __mod__ = _create_binary_op("mod", "%")
pow = __pow__ = _create_binary_op("pow", "**")
lshift = __lshift__ = _create_binary_op("lshift", "<<")
rshift = __rshift__ = _create_binary_op("rshift", ">>")
and_ = __and__ = _create_binary_op("and", "&")
xor = __xor__ = _create_binary_op("xor", "^")
or_ = __or__ = _create_binary_op("or", "|")
def _create_binary_inplace_op(binary_op: _BinaryOp) -> Callable[[Any, Any], Any]:
binary_operation_name = binary_op.__name__[2:-2]
method_name = f"__i{binary_operation_name}__"
operator = f"{binary_op._operator}="
def binary_inplace_op(lvalue: Any, rvalue: Any, /) -> Any:
lvalue_type = type(lvalue)
try:
method = debuiltins._mro_getattr(lvalue_type, method_name)
except AttributeError:
pass
else:
value = method(lvalue, rvalue)
if value is not NotImplemented:
return value
try:
return binary_op(lvalue, rvalue)
except TypeError as exc:
if exc._binary_op != binary_op._operator:
raise
raise TypeError(
f"unsupported operand type(s) for {operator}: {lvalue_type!r} and {type(rvalue)!r}"
)
binary_inplace_op.__name__ = binary_inplace_op.__qualname__ = method_name
binary_inplace_op.__doc__ = (
f"""Implement the augmented arithmetic assignment `a {operator} b`."""
)
return binary_inplace_op
iadd = __iadd__ = _create_binary_inplace_op(__add__)
isub = __isub__ = _create_binary_inplace_op(__sub__)
imul = __imul__ = _create_binary_inplace_op(__mul__)
imatmul = __imatmul__ = _create_binary_inplace_op(__matmul__)
itruediv = __itruediv__ = _create_binary_inplace_op(__truediv__)
ifloordiv = __ifloordiv__ = _create_binary_inplace_op(__floordiv__)
imod = __imod__ = _create_binary_inplace_op(__mod__)
ipow = __ipow__ = _create_binary_inplace_op(__pow__)
ilshift = __ilshift__ = _create_binary_inplace_op(__lshift__)
irshift = __irshift__ = _create_binary_inplace_op(__rshift__)
iand = __iand__ = _create_binary_inplace_op(__and__)
ixor = __ixor__ = _create_binary_inplace_op(__xor__)
ior = __ior__ = _create_binary_inplace_op(__or__)
def _create_rich_comparison(
operator: str, name: str, reflection: str, default: Callable[[str, Any, Any], bool]
) -> Callable[[Any, Any], Any]:
def _rich_comparison(lhs: Any, rhs: Any, /) -> Any:
lhs_type = type(lhs)
try:
lhs_method = debuiltins._mro_getattr(lhs_type, name)
except AttributeError:
lhs_method = _MISSING
rhs_type = type(rhs)
try:
rhs_method = debuiltins._mro_getattr(rhs_type, reflection)
except AttributeError:
rhs_method = _MISSING
call_lhs = lhs, lhs_method, rhs
call_rhs = rhs, rhs_method, lhs
if _is_proper_subclass(rhs_type, lhs_type):
calls = call_rhs, call_lhs
else:
calls = call_lhs, call_rhs
for first_obj, meth, second_obj in calls:
if meth is _MISSING:
continue
value = meth(first_obj, second_obj)
if value is not NotImplemented:
return value
else:
return default(operator, lhs, rhs)
_rich_comparison.__name__ = _rich_comparison.__qualname__ = name
_rich_comparison.__doc__ = f"Implement the rich comparison `a {operator} b`."
return _rich_comparison
def _rich_comparison_unsupported(operator: str, lhs: Any, rhs: Any) -> None:
raise TypeError(
f"unsupported operand type(s) for {operator!r}: {type(lhs)!r} and {type(rhs)!r}"
)
gt = __gt__ = _create_rich_comparison(
">", "__gt__", "__lt__", _rich_comparison_unsupported
)
lt = __lt__ = _create_rich_comparison(
"<", "__lt__", "__gt__", _rich_comparison_unsupported
)
ge = __ge__ = _create_rich_comparison(
">=", "__ge__", "__le__", _rich_comparison_unsupported
)
le = __le__ = _create_rich_comparison(
"<=", "__le__", "__ge__", _rich_comparison_unsupported
)
eq = __eq__ = _create_rich_comparison(
"==", "__eq__", "__eq__", lambda _, a, b: id(a) == id(b)
)
ne = __ne__ = _create_rich_comparison(
"!=", "__ne__", "__ne__", lambda _, a, b: id(a) != id(b)
)
def is_(a: Any, b: Any, /) -> bool:
return id(a) == id(b)
def is_not(a: Any, b: Any, /) -> bool:
return id(a) != id(b)
def index(obj: Any, /) -> int:
return debuiltins._index(obj)
def truth(obj: Any, /) -> bool:
return debuiltins._is_true(obj)
def not_(a: Any, /) -> bool:
return False if truth(a) else True
__not__ = not_
|
MIT License
|
houchengbin/openane
|
src/libnrl/graphsage/metrics.py
|
masked_l2
|
python
|
def masked_l2(preds, actuals, mask):
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
|
L2 loss with masking.
|
https://github.com/houchengbin/openane/blob/ddb0abbae5e5b6c13c7dad92887282c7493d7da0/src/libnrl/graphsage/metrics.py#L28-L34
|
import tensorflow as tf
def masked_logit_cross_entropy(preds, labels, mask):
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(loss)
def masked_softmax_cross_entropy(preds, labels, mask):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(loss)
|
MIT License
|
kklmn/xrt
|
tests/raycing/test_materials.py
|
compare_reflectivity_coated
|
python
|
def compare_reflectivity_coated():
def for_one_material(stripe, strOnly, refs, refp, theta, reprAngle):
fig = plt.figure(figsize=(8, 6), dpi=100)
fig.subplots_adjust(right=0.86)
ax = fig.add_subplot(111)
ax.set_xlabel('energy (keV)')
ax.set_ylabel('reflectivity')
ax.set_ylim(1e-7, 2)
fig.suptitle(stripe.name + ' ' + reprAngle, fontsize=16)
x, R2s = np.loadtxt(refs, unpack=True, skiprows=2, usecols=(0, 1))
p1, = ax.semilogy(x*1e-3, R2s, '-k', label='s CXRO')
x, R2s = np.loadtxt(refp, unpack=True, skiprows=2, usecols=(0, 1))
p2, = ax.semilogy(x*1e-3, R2s, '--k', label='p CXRO')
refl = stripe.get_amplitude(E, math.sin(theta))
rs, rp = refl[0], refl[1]
rs0, rp0 = strOnly.get_amplitude(E, math.sin(theta))[0:2]
p3, = ax.semilogy(E*1e-3, abs(rs)**2, '-r')
p4, = ax.semilogy(E*1e-3, abs(rp)**2, '--r')
p5, = ax.semilogy(E*1e-3, abs(rs0)**2, '-m')
p6, = ax.semilogy(E*1e-3, abs(rp0)**2, '--c')
l1 = ax.legend([p1, p2], ['s', 'p'], loc=3)
ax.legend([p1, p3, p5], ['CXRO', 'xrt', 'bulk coating'], loc=1)
ax.add_artist(l1)
ax2 = ax.twinx()
ax2.set_ylabel(r'$\phi_s - \phi_p$', color='c')
phi = np.unwrap(np.angle(rs * rp.conj()))
p9, = ax2.plot(E*1e-3, phi, 'c', lw=2, yunits=math.pi, zorder=0)
formatter = mpl.ticker.FormatStrFormatter('%g' + r'$ \pi$')
ax2.yaxis.set_major_formatter(formatter)
for tl in ax2.get_yticklabels():
tl.set_color('c')
ax2.set_zorder(-1)
ax.patch.set_visible(False)
fname = 'MirrorRefl' + stripe.name + "".join(reprAngle.split())
fig.savefig(fname + '.png')
dataDir = os.path.join('', 'CXRO-Reflectivities')
E = np.logspace(2., 4.+math.log10(4.), 1000)
mSi = rm.Material('Si', rho=2.33)
mSiO2 = rm.Material(('Si', 'O'), quantities=(1, 2), rho=2.65)
mRh = rm.Material('Rh', rho=12.41, kind='mirror')
mC = rm.Material('C', rho=3.5, kind='mirror')
cRhSi = rm.Coated(coating=mRh, cThickness=300,
substrate=mSi, surfaceRoughness=20,
substRoughness=20, name='30 nm Rh on Si')
cCSiO2 = rm.Coated(coating=mC, cThickness=200,
substrate=mSiO2, surfaceRoughness=10,
substRoughness=10, name='20 nm Diamond on Quartz')
for_one_material(cRhSi, mRh,
os.path.join(dataDir, "RhSi_s_rough2.CXRO.gz"),
os.path.join(dataDir, "RhSi_p_rough2.CXRO.gz"),
4e-3, '@ 4 mrad,\nRMS roughness 2 nm')
for_one_material(cCSiO2, mC,
os.path.join(dataDir, "CSiO2_s_rough1.CXRO.gz"),
os.path.join(dataDir, "CSiO2_p_rough1.CXRO.gz"),
np.radians(0.2), '@ 0.2 deg,\nRMS roughness 1 nm')
|
A comparison subroutine used in the module test suit.
|
https://github.com/kklmn/xrt/blob/b8a9d4a06e1d0c35cf6d5055d6229ae55346856f/tests/raycing/test_materials.py#L726-L788
|
__author__ = "Konstantin Klementiev"
__date__ = "12 Mar 2014"
import math
import numpy as np
import matplotlib as mpl
mpl.rcParams['backend'] = 'Qt5Agg'
import matplotlib.pyplot as plt
import os, sys; sys.path.append(os.path.join('..', '..'))
import xrt.backends.raycing.materials as rm
def compare_rocking_curves(hkl, t=None, geom='Bragg reflected', factDW=1.,
legendPos1=4, legendPos2=1):
def for_one_alpha(crystal, alphaDeg, hkl):
alpha = math.radians(alphaDeg)
s0 = (np.zeros_like(theta), np.cos(theta+alpha), -np.sin(theta+alpha))
sh = (np.zeros_like(theta), np.cos(theta-alpha), np.sin(theta-alpha))
if geom.startswith('Bragg'):
n = (0, 0, 1)
else:
n = (0, -1, 0)
hn = (0, math.sin(alpha), math.cos(alpha))
gamma0 = sum(i*j for i, j in zip(n, s0))
gammah = sum(i*j for i, j in zip(n, sh))
hns0 = sum(i*j for i, j in zip(hn, s0))
fig = plt.figure(figsize=(8, 6), dpi=100)
fig.subplots_adjust(right=0.88)
ax = fig.add_subplot(111)
curS, curP = crystal.get_amplitude(E, gamma0, gammah, hns0)
ax2 = ax.twinx()
ax2.set_ylabel(r'$\phi_s - \phi_p$', color='c')
phi = np.unwrap(np.angle(curS * curP.conj()))
p9, = ax2.plot((theta-thetaCenter) * convFactor, phi, 'c', lw=1,
yunits=math.pi, zorder=0)
formatter = mpl.ticker.FormatStrFormatter('%g' + r'$ \pi$')
ax2.yaxis.set_major_formatter(formatter)
for tl in ax2.get_yticklabels():
tl.set_color('c')
if t is not None:
tt = u', t={0:.0f}µm'.format(t * 1e3)
tname = '{0:03d}mum'.format(int(t * 1e3))
else:
tt = ' thick'
tname = 'thick'
if geom.startswith('Bragg'):
geomPrefix = 'b'
else:
geomPrefix = 'l'
if geom.endswith('transmitted'):
geomPrefix += 't'
fig.suptitle(r'{0} Si{1}, $\alpha={2:.1f}^\circ${3}'.format(geom,
hkl, alphaDeg, tt), fontsize=16)
path = os.path.join('', 'XOP-RockingCurves') + os.sep
x, R2s = np.loadtxt("{0}{1}Si{2}_{3}_{4:-.0f}_s.xc.gz".format(path,
geomPrefix, hkl, tname, alphaDeg), unpack=True)
p1, = ax.plot(x, R2s, '-k', label='s XCrystal')
x, R2p = np.loadtxt("{0}{1}Si{2}_{3}_{4:-.0f}_p.xc.gz".format(path,
geomPrefix, hkl, tname, alphaDeg), unpack=True)
p2, = ax.plot(x, R2p, '--k', label='p XCrystal')
x, R2s = np.loadtxt("{0}{1}Si{2}_{3}_{4:-.0f}_s.xin.gz".format(path,
geomPrefix, hkl, tname, alphaDeg), unpack=True)
p3, = ax.plot(x, R2s, '-b', label='s XInpro')
x, R2p = np.loadtxt("{0}{1}Si{2}_{3}_{4:-.0f}_p.xin.gz".format(path,
geomPrefix, hkl, tname, alphaDeg), unpack=True)
p4, = ax.plot(x, R2p, '--b', label='p XInpro')
p7, = ax.plot((theta - thetaCenter) * convFactor, abs(curS)**2, '-r')
p8, = ax.plot((theta - thetaCenter) * convFactor, abs(curP)**2, '--r')
ax.set_xlabel(r'$\theta-\theta_B$ (arcsec)')
if geom.endswith('transmitted'):
ax.set_ylabel('transmittivity')
else:
ax.set_ylabel('reflectivity')
ax.set_xlim([dtheta[0] * convFactor, dtheta[-1] * convFactor])
l1 = ax2.legend([p1, p2], ['s', 'p'], loc=legendPos1)
ax2.legend([p1, p3, p7], ['XCrystal/XOP', 'XInpro/XOP', 'xrt'],
loc=legendPos2)
ax2.add_artist(l1)
fname = '{0}Si{1}_{2}_{3:-.0f}'.format(
geomPrefix, hkl, tname, alphaDeg)
fig.savefig(fname + '.png')
E0 = 10000.
convFactor = 180 / math.pi * 3600.
if hkl == '111':
if geom.startswith('Bragg'):
dtheta = np.linspace(0, 100, 400) * 1e-6
else:
dtheta = np.linspace(-50, 50, 400) * 1e-6
dSpacing = 3.13562
hklInd = 1, 1, 1
elif hkl == '333':
if geom.startswith('Bragg'):
dtheta = np.linspace(0, 30, 400) * 1e-6
else:
dtheta = np.linspace(-15, 15, 400) * 1e-6
dSpacing = 3.13562 / 3
hklInd = 3, 3, 3
siCrystal = rm.CrystalDiamond(hklInd, dSpacing, t=t, geom=geom,
factDW=factDW)
thetaCenter = math.asin(rm.ch / (2*siCrystal.d*E0))
E = np.ones_like(dtheta) * E0
theta = dtheta + thetaCenter
for_one_alpha(siCrystal, 0., hkl)
for_one_alpha(siCrystal, -5., hkl)
for_one_alpha(siCrystal, 5., hkl)
def compare_rocking_curves_bent(hkl, t=None, geom='Laue reflected', factDW=1.,
legendPos1=4, legendPos2=1, Rcurvmm=None,
alphas=[0]):
try:
import pyopencl as cl
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
isOpenCL = True
except ImportError:
isOpenCL = False
if isOpenCL:
import xrt.backends.raycing.myopencl as mcl
matCL = mcl.XRT_CL(r'materials.cl',
)
else:
matCL = None
"""A comparison subroutine used in the module test suit."""
def for_one_alpha(crystal, alphaDeg, hkl):
xcb_tt = True
xcb_pp = False
Rcurv=np.inf if Rcurvmm is None else Rcurvmm
if Rcurv==0:
Rcurv=np.inf
alpha = np.radians(alphaDeg)
s0 = (np.zeros_like(theta), np.cos(theta+alpha), -np.sin(theta+alpha))
sh = (np.zeros_like(theta), np.cos(theta-alpha), np.sin(theta-alpha))
if geom.startswith('Bragg'):
n = (0, 0, 1)
else:
n = (0, -1, 0)
hn = (0, np.sin(alpha), np.cos(alpha))
gamma0 = sum(i*j for i, j in zip(n, s0))
gammah = sum(i*j for i, j in zip(n, sh))
hns0 = sum(i*j for i, j in zip(hn, s0))
fig = plt.figure(figsize=(8, 6), dpi=100)
fig.subplots_adjust(right=0.88)
ax = fig.add_subplot(111)
curS, curP = crystal.get_amplitude(E, gamma0, gammah, hns0)
curSD, curPD = crystal.get_amplitude_TT(E, gamma0, gammah, hns0,
ucl=matCL, alphaAsym=alpha,
Rcurvmm=Rcurv)
if t is not None:
tt = u', t={0:.0f}µm'.format(t * 1e3)
tname = '{0:03d}mkm'.format(int(t * 1e3))
else:
tt = ' thick'
tname = 'thick'
if geom.startswith('Bragg'):
geomPrefix = 'b'
else:
geomPrefix = 'l'
if geom.endswith('transmitted'):
geomPrefix += 't'
if np.isinf(Rcurv):
fig.suptitle(r'{0} Si{1}, $\alpha={2:.1f}^\circ$, bending R=$\infty$, E={3:.0f}keV'.format(geom,
hkl, alphaDeg, E0/1e3), fontsize=16)
else:
fig.suptitle(r'{0} Si{1}, $\alpha={2:.1f}^\circ$, bending R={3:.1f}m, E={4:.0f}keV'.format(geom,
hkl, alphaDeg, Rcurv/1e3, E0/1e3), fontsize=16)
path = os.path.join('', 'XOP-RockingCurves-Bent') + os.sep
if np.isinf(Rcurv) or alphaDeg == 0:
x, R2p, R2s = np.loadtxt(
"{0}{1}Si{2}_E{3:-.0f}keV_t{4}_R{5:-.0f}m_a{6:-.0f}deg.xc.gz".format(
path, geomPrefix, hkl, E0/1e3, tname, Rcurv/1e3, alphaDeg),
unpack=True, usecols=(0, 2, 3))
p1, = ax.plot(x, R2s, '-k', label='s XCrystal', linewidth=2)
if xcb_tt:
x, R2s = np.loadtxt(
"{0}{1}Si{2}_E{3:-.0f}keV_t{4}_R{5:-.0f}m_a{6:-.0f}deg_tt_sigma.xcb.gz".format(
path, geomPrefix, hkl, E0/1e3, tname, Rcurv/1e3, alphaDeg),
unpack=True, usecols=(0, 3))
p3t, = ax.plot(x*1e3, R2s, '-b', label='s XCrystal_Bent TT')
x, R2p = np.loadtxt(
"{0}{1}Si{2}_E{3:-.0f}keV_t{4}_R{5:-.0f}m_a{6:-.0f}deg_tt_pi.xcb.gz".format(
path, geomPrefix, hkl, E0/1e3, tname, Rcurv/1e3, alphaDeg),
unpack=True, usecols=(0, 3))
if xcb_pp:
x, R2s = np.loadtxt(
"{0}{1}Si{2}_E{3:-.0f}keV_t{4}_R{5:-.0f}m_a{6:-.0f}deg_pp_sigma.xcb.gz".format(
path, geomPrefix, hkl, E0/1e3, tname, Rcurv/1e3, alphaDeg),
unpack=True, usecols=(0, 7))
p3p, = ax.plot(x*1e3, R2s, '-c', label='s XCrystal_Bent PP')
x, R2p = np.loadtxt(
"{0}{1}Si{2}_E{3:-.0f}keV_t{4}_R{5:-.0f}m_a{6:-.0f}deg_pp_pi.xcb.gz".format(
path, geomPrefix, hkl, E0/1e3, tname, Rcurv/1e3, alphaDeg),
unpack=True, usecols=(0, 7))
p7, = ax.plot((theta - thetaCenter) * convFactor,
abs(curS)**2, '-r', linewidth=2)
p11, = ax.plot((theta - thetaCenter) * convFactor,
abs(curSD)**2, '--g', linewidth=2)
ax.set_xlabel(r'$\theta-\theta_B$ ($\mu$rad)')
if geom.endswith('transmitted'):
ax.set_ylabel('transmittivity')
else:
ax.set_ylabel('reflectivity')
ax.set_xlim([dtheta[0] * convFactor, dtheta[-1] * convFactor])
plotList = [p7, p11]
curveList = ['xrt perfect', 'xrt TT']
if np.isinf(Rcurv) or alphaDeg == 0:
plotList.append(p1)
curveList.append('XCrystal')
if xcb_tt:
plotList.append(p3t)
curveList.append('XCrystal_Bent TT')
if xcb_pp:
plotList.append(p3p)
curveList.append('XCrystal_Bent PP')
legendPostmp = legendPos2 if alphaDeg > 0 else legendPos2+1
ax.legend(plotList, curveList, loc=legendPostmp)
fname = '{0}Si{1}_{2}_a{3:-.0f}_{4:-.2f}'.format(
geomPrefix, hkl, tname, alphaDeg, Rcurv/1e3)
fig.savefig(fname + '.png')
E0 = 50000.
convFactor = 1e6
for alpha in alphas:
if hkl == '111':
if geom.startswith('Bragg'):
dtheta = np.linspace(0, 100, 400) * 1e-6
else:
dtheta = np.linspace(-5-np.abs(alpha)*2*50e3/Rcurvmm,
5+np.abs(alpha)*2*50e3/Rcurvmm,
1000) * 1e-6
dSpacing = 3.13562
hklInd = 1, 1, 1
elif hkl == '333':
if geom.startswith('Bragg'):
dtheta = np.linspace(-25, 75, 400) * 1e-6
else:
dtheta = np.linspace(-2-np.abs(alpha)**1.2*50e3/Rcurvmm,
2+np.abs(alpha)**1.2*50e3/Rcurvmm,
1000) * 1e-6
dSpacing = 3.13562 / 3.
hklInd = 3, 3, 3
siCrystal = rm.CrystalDiamond(hklInd, dSpacing, t=t, geom=geom,
factDW=factDW)
thetaCenter = math.asin(rm.ch / (2*siCrystal.d*E0))
E = np.ones_like(dtheta) * E0
theta = dtheta + thetaCenter
for_one_alpha(siCrystal, alpha, hkl)
def compare_Bragg_Laue(hkl, beamPath, factDW=1.):
def for_one_alpha(alphaDeg, hkl):
alpha = math.radians(alphaDeg)
s0 = (np.zeros_like(theta), np.cos(theta+alpha), -np.sin(theta+alpha))
sh = (np.zeros_like(theta), np.cos(theta-alpha), np.sin(theta-alpha))
n = (0, 0, 1)
hn = (0, math.sin(alpha), math.cos(alpha))
gamma0 = sum(i*j for i, j in zip(n, s0))
gammah = sum(i*j for i, j in zip(n, sh))
hns0 = sum(i*j for i, j in zip(hn, s0))
braggS, braggP = siBraggCrystal.get_amplitude(E, gamma0, gammah, hns0)
n = (0, -1, 0)
hn = (0, math.sin(alpha), math.cos(alpha))
gamma0 = sum(i*j for i, j in zip(n, s0))
gammah = sum(i*j for i, j in zip(n, sh))
hns0 = sum(i*j for i, j in zip(hn, s0))
laueS, laueP = siLaueCrystal.get_amplitude(E, gamma0, gammah, hns0)
fig = plt.figure(figsize=(8, 6), dpi=100)
ax = fig.add_subplot(111)
ax2 = ax.twinx()
ax2.set_ylabel(r'$\phi_s - \phi_p$', color='c')
phi = np.unwrap(np.angle(braggS * braggP.conj()))
p5, = ax2.plot((theta-thetaCenter) * convFactor, phi, '-c', lw=1,
yunits=math.pi, zorder=0)
phi = np.unwrap(np.angle(laueS * laueP.conj()))
p6, = ax2.plot((theta-thetaCenter) * convFactor, phi, '-c.', lw=1,
yunits=math.pi, zorder=0)
formatter = mpl.ticker.FormatStrFormatter('%g' + r'$ \pi$')
ax2.yaxis.set_major_formatter(formatter)
for tl in ax2.get_yticklabels():
tl.set_color('c')
fig.suptitle(r'Comparison of Bragg and Laue transmittivity for Si{0}'.
format(hkl), fontsize=16)
p1, = ax.plot((theta-thetaCenter) * convFactor, abs(braggS)**2, '-r')
p2, = ax.plot((theta-thetaCenter) * convFactor, abs(braggP)**2, '-b')
p3, = ax.plot((theta-thetaCenter) * convFactor, abs(laueS)**2, '-r.')
p4, = ax.plot((theta-thetaCenter) * convFactor, abs(laueP)**2, '-b.')
ax.set_xlabel(r'$\theta-\theta_B$ (arcsec)')
ax.set_ylabel('transmittivity')
l1 = ax.legend([p1, p2], ['s', 'p'], loc=3)
ax.legend([p1, p3], [u'Bragg t={0:.1f} µm'.format(
siBraggCrystal.t * 1e3), u'Laue t={0:.1f} µm'.format(
siLaueCrystal.t * 1e3)], loc=2)
ax.add_artist(l1)
ax.set_xlim([dtheta[0] * convFactor, dtheta[-1] * convFactor])
fname = r'BraggLaueTrSi{0}'.format(hkl)
fig.savefig(fname + '.png')
E0 = 10000.
convFactor = 180 / math.pi * 3600.
if hkl == '111':
dtheta = np.linspace(-100, 100, 400) * 1e-6
dSpacing = 3.13562
hklInd = 1, 1, 1
elif hkl == '333':
dtheta = np.linspace(-30, 30, 400) * 1e-6
dSpacing = 3.13562 / 3
hklInd = 3, 3, 3
thetaCenter = math.asin(rm.ch / (2*dSpacing*E0))
t = beamPath * math.sin(thetaCenter)
siBraggCrystal = rm.CrystalDiamond(hklInd, dSpacing, t=t,
geom='Bragg transmitted', factDW=factDW)
t = beamPath * math.cos(thetaCenter)
siLaueCrystal = rm.CrystalDiamond(hklInd, dSpacing, t=t,
geom='Laue transmitted', factDW=factDW)
E = np.ones_like(dtheta) * E0
theta = dtheta + thetaCenter
for_one_alpha(0., hkl)
def compare_reflectivity():
def for_one_material(stripe, refs, refp, theta, reprAngle):
fig = plt.figure(figsize=(8, 6), dpi=100)
fig.subplots_adjust(right=0.86)
ax = fig.add_subplot(111)
ax.set_xlabel('energy (eV)')
ax.set_ylabel('reflectivity')
ax.set_xlim(30, 5e4)
fig.suptitle(stripe.name + ' ' + reprAngle, fontsize=16)
x, R2s = np.loadtxt(refs, unpack=True)
p1, = ax.plot(x, R2s, '-k', label='s xf1f2')
x, R2s = np.loadtxt(refp, unpack=True)
p2, = ax.plot(x, R2s, '--k', label='p xf1f2')
refl = stripe.get_amplitude(E, math.sin(theta))
rs, rp = refl[0], refl[1]
p3, = ax.semilogx(E, abs(rs)**2, '-r')
p4, = ax.semilogx(E, abs(rp)**2, '--r')
l1 = ax.legend([p1, p2], ['s', 'p'], loc=3)
ax.legend([p1, p3], ['Xf1f2/XOP', 'xrt'], loc=6)
ax.add_artist(l1)
ax2 = ax.twinx()
ax2.set_ylabel(r'$\phi_s - \phi_p$', color='c')
phi = np.unwrap(np.angle(rs * rp.conj()))
p9, = ax2.plot(E, phi, 'c', lw=2, yunits=math.pi, zorder=0)
formatter = mpl.ticker.FormatStrFormatter('%g' + r'$ \pi$')
ax2.yaxis.set_major_formatter(formatter)
for tl in ax2.get_yticklabels():
tl.set_color('c')
fname = 'MirrorRefl' + stripe.name + "".join(reprAngle.split())
fig.savefig(fname + '.png')
dataDir = os.path.join('', 'XOP-Reflectivities')
E = np.logspace(1.+math.log10(3.), 4.+math.log10(5.), 500)
stripeSi = rm.Material('Si', rho=2.33)
for_one_material(stripeSi,
os.path.join(dataDir, "Si05deg_s.xf1f2.gz"),
os.path.join(dataDir, "Si05deg_p.xf1f2.gz"),
math.radians(0.5), '@ 0.5 deg')
stripePt = rm.Material('Pt', rho=21.45)
for_one_material(stripePt,
os.path.join(dataDir, "Pt4mrad_s.xf1f2.gz"),
os.path.join(dataDir, "Pt4mrad_p.xf1f2.gz"),
4e-3, '@ 4 mrad')
stripeSiO2 = rm.Material(('Si', 'O'), quantities=(1, 2), rho=2.2)
for_one_material(stripeSiO2,
os.path.join(dataDir, "SiO205deg_s.xf1f2.gz"),
os.path.join(dataDir, "SiO205deg_p.xf1f2.gz"),
math.radians(0.5), '@ 0.5 deg')
stripeRh = rm.Material('Rh', rho=12.41)
for_one_material(stripeRh,
os.path.join(dataDir, "Rh2mrad_s.xf1f2.gz"),
os.path.join(dataDir, "Rh2mrad_p.xf1f2.gz"),
2e-3, '@ 2 mrad')
|
MIT License
|
softlayer/softlayer-python
|
SoftLayer/config.py
|
get_client_settings_env
|
python
|
def get_client_settings_env(**_):
return {
'proxy': os.environ.get('https_proxy'),
'username': os.environ.get('SL_USERNAME'),
'api_key': os.environ.get('SL_API_KEY'),
}
|
Retrieve client settings from environment settings.
:param \\*\\*kwargs: Arguments that are passed into the client instance
|
https://github.com/softlayer/softlayer-python/blob/98feac7db01b50eddeeb45769182ab978ebeefc3/SoftLayer/config.py#L34-L44
|
import configparser
import logging
import os
import os.path
LOGGER = logging.getLogger(__name__)
def get_client_settings_args(**kwargs):
timeout = kwargs.get('timeout')
if timeout is not None:
timeout = float(timeout)
return {
'endpoint_url': kwargs.get('endpoint_url'),
'timeout': timeout,
'proxy': kwargs.get('proxy'),
'username': kwargs.get('username'),
'api_key': kwargs.get('api_key'),
}
|
MIT License
|
corteva/geocube
|
geocube/vector_to_cube.py
|
VectorToCube._get_attrs
|
python
|
def _get_attrs(measurement_name, fill_value):
return dict(
name=measurement_name,
long_name=measurement_name,
_FillValue=fill_value,
)
|
Get attributes for data array.
Parameters
----------
measurement_name: str
The measurement name.
fill_value: int or float
The fill value.
Returns
-------
dict: Dict with attributes for data array.
|
https://github.com/corteva/geocube/blob/f75ac98385ec79bc364e5f27dcc4fb5b1ca1a938/geocube/vector_to_cube.py#L168-L187
|
import numpy
import pandas
import xarray
from rioxarray.rioxarray import DEFAULT_GRID_MAP, affine_to_coords
from geocube.geo_utils.geobox import load_vector_data
from geocube.logger import get_logger
from geocube.rasterize import rasterize_image
def _format_series_data(data_series):
if "datetime" in str(data_series.dtype):
data_series = pandas.to_numeric(data_series).astype(numpy.float64)
get_logger().warning(
f"The series '{data_series.name}' was converted from a date to a number to "
"rasterize the data. To load the data back in as a date, "
"use 'pandas.to_datetime()'."
)
elif str(data_series.dtype) == "category":
data_series = data_series.cat.codes
return data_series
class VectorToCube:
def __init__(self, vector_data, geobox_maker, fill=None, categorical_enums=None):
self._vector_data = load_vector_data(vector_data)
self._geobox = geobox_maker.from_vector(self._vector_data)
self._grid_coords = affine_to_coords(
self._geobox.affine, self._geobox.width, self._geobox.height
)
self._fill = fill if fill is not None else numpy.nan
if categorical_enums is not None:
for column_name, categories in categorical_enums.items():
category_type = pandas.api.types.CategoricalDtype(
categories=sorted(set(categories)) + ["nodata"]
)
self._vector_data[column_name] = self._vector_data[column_name].astype(
category_type
)
self._rasterize_function = rasterize_image
self._datetime_measurements = ()
self._categorical_enums = {}
def make_geocube(
self,
measurements=None,
datetime_measurements=None,
group_by=None,
interpolate_na_method=None,
rasterize_function=None,
):
self._rasterize_function = (
rasterize_image if rasterize_function is None else rasterize_function
)
if measurements is None:
measurements = self._vector_data.columns.tolist()
measurements.remove("geometry")
self._datetime_measurements = ()
if datetime_measurements is not None:
self._datetime_measurements = tuple(
set(datetime_measurements) & set(measurements)
)
vector_data = self._vector_data.to_crs(self._geobox.crs.wkt)
for datetime_measurement in self._datetime_measurements:
vector_data[datetime_measurement] = pandas.to_datetime(
vector_data[datetime_measurement]
).astype("datetime64[ns]")
self._categorical_enums = {}
for categorical_column in vector_data.select_dtypes(["category"]).columns:
self._categorical_enums[categorical_column] = vector_data[
categorical_column
].cat.categories
if group_by:
vector_data = vector_data.groupby(group_by)
try:
measurements.remove(group_by)
except ValueError:
pass
return self._get_dataset(
vector_data, measurements, group_by, interpolate_na_method
)
@staticmethod
|
BSD 3-Clause New or Revised License
|
stanfordvl/taskonomy
|
code/lib/data/load_ops.py
|
resize_rescale_image_low_sat
|
python
|
def resize_rescale_image_low_sat(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.1, 0.9)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
|
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
|
https://github.com/stanfordvl/taskonomy/blob/d486b5ecb7718531669a35d4fe3022a19c2bb377/code/lib/data/load_ops.py#L166-L186
|
from __future__ import absolute_import, division, print_function
import itertools
import json
import math
import numpy as np
from numpy import linalg as LA
import os
from PIL import Image
import PIL
import pdb
import pickle
import random
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
import skimage
import skimage.io
from skimage.transform import resize
import sklearn.neighbors as nn
import string
import subprocess
import sys
import tensorflow as tf
from transforms3d import euler
import transforms3d
import traceback as tb
if tf.__version__ == '0.10.0':
tf_summary_scalar = tf.scalar_summary
else:
tf_summary_scalar = tf.summary.scalar
def load_from_aws( filename, verbose=False ):
rand_string_length = 18
store = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(rand_string_length))
store = os.path.join('.', 'temp', store)
s3 = boto3.resource('s3')
bucket_name = filename.split('/')[0]
fname = os.path.join(*filename.split('/')[1:])
s3.Bucket(bucket_name).download_file(fname, store)
return 'Done', store
def load_scaled_image( filename, color=True ):
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def load_raw_image( filename, color=True, is_aws=False, use_pil=False ):
if is_aws:
success, filename = load_from_aws( filename )
if use_pil:
img = Image.open( filename )
else:
img = skimage.io.imread(filename, as_grey=not color)
if is_aws:
os.remove( filename )
if use_pil:
return img
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_rescale_imagenet(img, new_dims, interp_order=1, current_scale=None, no_clip=False):
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = img[:,:,[2,1,0]] * 255.
root = '/home/ubuntu/task-taxonomy-331b/lib/data'
mean_bgr = [103.062623801, 115.902882574, 123.151630838]
img = img - mean_bgr
return img
|
MIT License
|
fowlerlab/enrich2
|
enrich2/storemanager.py
|
StoreManager.plots_requested
|
python
|
def plots_requested(self, value):
if value in (True, False):
self._plots_requested = value
else:
raise ValueError(
"Invalid setting '{}' for plots_requested "
"[{}]".format(value, self.name)
)
|
Make sure the *value* is valid and set it.
|
https://github.com/fowlerlab/enrich2/blob/9c9b9a7498e6adeb1ca09fc58b956d30c660f94c/enrich2/storemanager.py#L223-L233
|
from __future__ import print_function
import os
import logging
import pandas as pd
import collections
import getpass
import time
SCORING_METHODS = collections.OrderedDict(
[
("WLS", "weighted least squares"),
("ratios", "log ratios (Enrich2)"),
("counts", "counts only"),
("OLS", "ordinary least squares"),
("simple", "log ratios (old Enrich)"),
]
)
LOGR_METHODS = collections.OrderedDict(
[
("wt", "wild type"),
("complete", "library size (complete cases)"),
("full", "library size (all reads)"),
]
)
ELEMENT_LABELS = ["barcodes", "identifiers", "variants", "synonymous"]
def fix_filename(s):
fname = "".join(c for c in s if c.isalnum() or c in (" ._~"))
fname = fname.replace(" ", "_")
return fname
class StoreManager(object):
store_suffix = None
has_store = True
treeview_class_name = None
def __init__(self):
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
self._name = None
self.name = "Unnamed" + self.__class__.__name__
self.parent = None
self._labels = list()
self.username = getpass.getuser()
self.creationtime = time.asctime()
self.store_cfg = None
self.store_path = None
self.store = None
self.chunksize = 100000
self._output_dir = None
self._output_dir_override = None
self._plot_dir = None
self._tsv_dir = None
self._scoring_method = None
self._logr_method = None
self._force_recalculate = None
self._component_outliers = None
self._plots_requested = None
self._tsv_requested = None
self.treeview_id = None
self.treeview_info = None
self.plot_options = None
def child_labels(self):
shared = list()
for x in self.children:
shared.extend(x.labels)
shared = collections.Counter(shared)
shared = [x for x in shared.keys() if shared[x] == len(self.children)]
return sorted(shared, key=lambda a: ELEMENT_LABELS.index(a))
@property
def labels(self):
if self.children is None:
return self._labels
else:
if len(self.children) > 0:
return self.child_labels()
else:
return self._labels
@property
def force_recalculate(self):
if self._force_recalculate is None:
if self.parent is not None:
return self.parent.force_recalculate
else:
raise ValueError(
"Forced recalculation option not specified "
"at root [{}]".format(self.name)
)
else:
return self._force_recalculate
@force_recalculate.setter
def force_recalculate(self, value):
if value in (True, False):
self._force_recalculate = value
else:
raise ValueError(
"Invalid setting '{}' for force_recalculate "
"[{}]".format(value, self.name)
)
@property
def component_outliers(self):
if self._component_outliers is None:
if self.parent is not None:
return self.parent.component_outliers
else:
raise ValueError(
"Calculate component outliers option not "
"specified at root [{}]".format(self.name)
)
else:
return self._component_outliers
@component_outliers.setter
def component_outliers(self, value):
if value in (True, False):
self._component_outliers = value
else:
raise ValueError(
"Invalid setting '{}' for component_outliers "
"[{}]".format(value, self.name)
)
@property
def plots_requested(self):
if self._plots_requested is None:
if self.parent is not None:
return self.parent.plots_requested
else:
raise ValueError(
"Make plots option not specified at root " "[{}]".format(self.name)
)
else:
return self._plots_requested
@plots_requested.setter
|
BSD 3-Clause New or Revised License
|
surrealai/surreal
|
surreal/main/rollout.py
|
restore_model
|
python
|
def restore_model(folder, filename):
path_to_ckpt = path.join(folder, "checkpoint", filename)
with open(path_to_ckpt, 'rb') as fp:
data = pickle.load(fp)
return data['model']
|
Loads model from an experiment folder.
|
https://github.com/surrealai/surreal/blob/ae9e5f43bdd7d1bc6d39d0a4783b96b2c117fade/surreal/main/rollout.py#L15-L22
|
import pickle
import sys
import time
import argparse
from os import path
from glob import glob
from surreal.env import *
import surreal.utils as U
from surreal.agent import PPOAgent, DDPGAgent
from benedict import BeneDict
|
MIT License
|
jackgoffinet/autoencoded-vocal-analysis
|
ava/models/window_vae_dataset.py
|
FixedWindowDataset.write_hdf5_files
|
python
|
def write_hdf5_files(self, save_dir, num_files=500, sylls_per_file=100):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for write_file_num in range(num_files):
specs, file_indices, _, _ = self.__getitem__(np.arange(sylls_per_file), seed=write_file_num, return_seg_info=True)
specs = np.array([spec.detach().numpy() for spec in specs])
filenames = np.array([self.filenames[i] for i in file_indices])
fn = "syllables_" + str(write_file_num).zfill(4) + '.hdf5'
fn = os.path.join(save_dir, fn)
with h5py.File(fn, "w") as f:
f.create_dataset('specs', data=specs)
f.create_dataset('audio_filenames', data=filenames.astype('S'))
|
Write hdf5 files containing spectrograms of random audio chunks.
TO DO
-----
* Write to multiple directories.
Note
----
* This should be consistent with
`ava.preprocessing.preprocess.process_sylls`.
Parameters
----------
save_dir : str
Directory to save hdf5s in.
num_files : int, optional
Number of files to save. Defaults to ``500``.
sylls_per_file : int, optional
Number of syllables in each file. Defaults to ``100``.
|
https://github.com/jackgoffinet/autoencoded-vocal-analysis/blob/f10257a834efa9bc2bb9cb0e7e350e7a7798c6e7/ava/models/window_vae_dataset.py#L259-L293
|
__date__ = "August 2019 - November 2020"
from affinewarp import PiecewiseWarping
import h5py
import numpy as np
import os
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.io.wavfile import WavFileWarning
from torch.utils.data import Dataset, DataLoader
import warnings
from ava.models.utils import numpy_to_tensor, _get_wavs_from_dir, _get_specs_and_amplitude_traces
DEFAULT_WARP_PARAMS = {
'n_knots': 0,
'warp_reg_scale': 1e-2,
'smoothness_reg_scale': 1e-1,
'l2_reg_scale': 1e-7,
}
EPSILON = 1e-9
def get_window_partition(audio_dirs, roi_dirs, split=0.8, shuffle=True, exclude_empty_roi_files=True):
assert(split > 0.0 and split <= 1.0)
audio_filenames, roi_filenames = [], []
for audio_dir, roi_dir in zip(audio_dirs, roi_dirs):
temp_wavs = _get_wavs_from_dir(audio_dir)
temp_rois = [os.path.join(roi_dir, os.path.split(i)[-1][:-4]+'.txt') for i in temp_wavs]
if exclude_empty_roi_files:
for i in reversed(range(len(temp_wavs))):
segs = np.loadtxt(temp_rois[i])
if len(segs) == 0:
del temp_wavs[i]
del temp_rois[i]
audio_filenames += temp_wavs
roi_filenames += temp_rois
audio_filenames = np.array(audio_filenames)
roi_filenames = np.array(roi_filenames)
perm = np.argsort(audio_filenames)
audio_filenames, roi_filenames = audio_filenames[perm], roi_filenames[perm]
if shuffle:
np.random.seed(42)
perm = np.random.permutation(len(audio_filenames))
audio_filenames = audio_filenames[perm]
roi_filenames = roi_filenames[perm]
np.random.seed(None)
i = int(round(split * len(audio_filenames)))
return { 'train': { 'audio': audio_filenames[:i], 'rois': roi_filenames[:i]}, 'test': { 'audio': audio_filenames[i:], 'rois': roi_filenames[i:]} }
def get_fixed_window_data_loaders(partition, p, batch_size=64, shuffle=(True, False), num_workers=4, min_spec_val=None):
train_dataset = FixedWindowDataset(partition['train']['audio'], partition['train']['rois'], p, transform=numpy_to_tensor, min_spec_val=min_spec_val)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle[0], num_workers=num_workers)
if not partition['test']:
return {'train':train_dataloader, 'test':None}
test_dataset = FixedWindowDataset(partition['test']['audio'], partition['test']['rois'], p, transform=numpy_to_tensor, min_spec_val=min_spec_val)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=shuffle[1], num_workers=num_workers)
return {'train':train_dataloader, 'test':test_dataloader}
class FixedWindowDataset(Dataset):
def __init__(self, audio_filenames, roi_filenames, p, transform=None,
dataset_length=2048, min_spec_val=None):
self.filenames = np.array(sorted(audio_filenames))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=WavFileWarning)
self.audio = [wavfile.read(fn)[1] for fn in self.filenames]
self.fs = wavfile.read(audio_filenames[0])[0]
self.roi_filenames = roi_filenames
self.dataset_length = dataset_length
self.min_spec_val = min_spec_val
self.p = p
self.rois = [np.loadtxt(i, ndmin=2) for i in roi_filenames]
self.file_weights = np.array([np.sum(np.diff(i)) for i in self.rois])
self.file_weights /= np.sum(self.file_weights)
self.roi_weights = []
for i in range(len(self.rois)):
temp = np.diff(self.rois[i]).flatten()
self.roi_weights.append(temp/np.sum(temp))
self.transform = transform
def __len__(self):
return self.dataset_length
def __getitem__(self, index, seed=None, shoulder=0.05, return_seg_info=False):
specs, file_indices, onsets, offsets = [], [], [], []
single_index = False
try:
iterator = iter(index)
except TypeError:
index = [index]
single_index = True
np.random.seed(seed)
for i in index:
while True:
file_index = np.random.choice(np.arange(len(self.filenames)), p=self.file_weights)
load_filename = self.filenames[file_index]
roi_index = np.random.choice(np.arange(len(self.roi_weights[file_index])),
p=self.roi_weights[file_index])
roi = self.rois[file_index][roi_index]
onset = roi[0] + (roi[1] - roi[0] - self.p['window_length']) * np.random.rand()
offset = onset + self.p['window_length']
target_times = np.linspace(onset, offset, self.p['num_time_bins'])
spec, flag = self.p['get_spec'](max(0.0, onset-shoulder), offset+shoulder, self.audio[file_index], self.p, fs=self.fs, target_times=target_times)
if not flag:
continue
if self.min_spec_val is not None and np.max(spec) < self.min_spec_val:
continue
if self.transform:
spec = self.transform(spec)
specs.append(spec)
file_indices.append(file_index)
onsets.append(onset)
offsets.append(offset)
break
np.random.seed(None)
if return_seg_info:
if single_index:
return specs[0], file_indices[0], onsets[0], offsets[0]
return specs, file_indices, onsets, offsets
if single_index:
return specs[0]
return specs
|
MIT License
|
jmelett/pyfx
|
trader/lib/rfc3339.py
|
tzinfo.__repr__
|
python
|
def __repr__(self):
if self.minutesEast == 0:
return "rfc3339.UTC_TZ"
else:
return "rfc3339.tzinfo(%s,%s)" % (self.minutesEast, repr(self.name))
|
If minutesEast==0, prints specially as rfc3339.UTC_TZ.
|
https://github.com/jmelett/pyfx/blob/515dc8eaa9862d2bb28656a8c5c5c21d2a054f69/trader/lib/rfc3339.py#L129-L134
|
import datetime, time, calendar
import re
__all__ = ["tzinfo", "UTC_TZ", "parse_date", "parse_datetime", "now", "utcfromtimestamp", "utctotimestamp", "datetimetostr", "timestamptostr", "strtotimestamp"]
ZERO = datetime.timedelta(0)
class tzinfo(datetime.tzinfo):
def __init__(self, minutesEast = 0, name = 'Z'):
self.minutesEast = minutesEast
self.offset = datetime.timedelta(minutes = minutesEast)
self.name = name
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self.name
|
MIT License
|
renatahodovan/fuzzinator
|
fuzzinator/listener/event_listener.py
|
EventListener.on_load_updated
|
python
|
def on_load_updated(self, load):
pass
|
Invoked when the framework's load changes.
:param int load: number between 0 and controller's capacity.
|
https://github.com/renatahodovan/fuzzinator/blob/49e6cf1b5dad59e82f7bed5f14b23dbd7c520ad0/fuzzinator/listener/event_listener.py#L26-L32
|
class EventListener(object):
def __init__(self, config):
self.config = config
|
BSD 3-Clause New or Revised License
|
bilhim/trafficsimulator
|
src/trafficSimulator/window.py
|
Window.line
|
python
|
def line(self, start_pos, end_pos, color):
gfxdraw.line(
self.screen,
*start_pos,
*end_pos,
color
)
|
Draws a line.
|
https://github.com/bilhim/trafficsimulator/blob/43b06d6f8b6117589b7af35ff0d8b8ac77a0ce5e/src/trafficSimulator/window.py#L120-L127
|
import pygame
from pygame import gfxdraw
import numpy as np
class Window:
def __init__(self, sim, config={}):
self.sim = sim
self.set_default_config()
for attr, val in config.items():
setattr(self, attr, val)
def set_default_config(self):
self.width = 1400
self.height = 900
self.bg_color = (250, 250, 250)
self.fps = 60
self.zoom = 5
self.offset = (0, 0)
self.mouse_last = (0, 0)
self.mouse_down = False
def loop(self, loop=None):
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.flip()
clock = pygame.time.Clock()
pygame.font.init()
self.text_font = pygame.font.SysFont('Lucida Console', 16)
running = True
while running:
if loop: loop(self.sim)
self.draw()
pygame.display.update()
clock.tick(self.fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
x, y = pygame.mouse.get_pos()
x0, y0 = self.offset
self.mouse_last = (x-x0*self.zoom, y-y0*self.zoom)
self.mouse_down = True
if event.button == 4:
self.zoom *= (self.zoom**2+self.zoom/4+1) / (self.zoom**2+1)
if event.button == 5:
self.zoom *= (self.zoom**2+1) / (self.zoom**2+self.zoom/4+1)
elif event.type == pygame.MOUSEMOTION:
if self.mouse_down:
x1, y1 = self.mouse_last
x2, y2 = pygame.mouse.get_pos()
self.offset = ((x2-x1)/self.zoom, (y2-y1)/self.zoom)
elif event.type == pygame.MOUSEBUTTONUP:
self.mouse_down = False
def run(self, steps_per_update=1):
def loop(sim):
sim.run(steps_per_update)
self.loop(loop)
def convert(self, x, y=None):
if isinstance(x, list):
return [self.convert(e[0], e[1]) for e in x]
if isinstance(x, tuple):
return self.convert(*x)
return (
int(self.width/2 + (x + self.offset[0])*self.zoom),
int(self.height/2 + (y + self.offset[1])*self.zoom)
)
def inverse_convert(self, x, y=None):
if isinstance(x, list):
return [self.convert(e[0], e[1]) for e in x]
if isinstance(x, tuple):
return self.convert(*x)
return (
int(-self.offset[0] + (x - self.width/2)/self.zoom),
int(-self.offset[1] + (y - self.height/2)/self.zoom)
)
def background(self, r, g, b):
self.screen.fill((r, g, b))
|
MIT License
|
containers/podman-py
|
podman/api_connection.py
|
ApiConnection.request
|
python
|
def request(self, method, url, body=None, headers=None, *, encode_chunked=False):
if headers is None:
headers = {}
super().request(method, url, body, headers, encode_chunked=encode_chunked)
response = super().getresponse()
if HTTPStatus.OK <= response.status < HTTPStatus.MULTIPLE_CHOICES:
pass
elif HTTPStatus.NOT_FOUND == response.status:
raise errors.NotFoundError(
"Request {}:{} failed: {}".format(
method,
url,
HTTPStatus.NOT_FOUND.description or HTTPStatus.NOT_FOUND.phrase,
),
response,
)
elif (
response.status >= HTTPStatus.BAD_REQUEST
and response.status < HTTPStatus.INTERNAL_SERVER_ERROR
):
raise errors.RequestError(
"Request {}:{} failed: {}".format(
method,
url,
response.reason or "Response Status Code {}".format(response.status),
),
response,
)
elif response.status >= HTTPStatus.INTERNAL_SERVER_ERROR:
try:
error_body = response.read()
error_message = json.loads(error_body)["message"]
except:
error_message = (
HTTPStatus.INTERNAL_SERVER_ERROR.description
or HTTPStatus.INTERNAL_SERVER_ERROR.phrase
)
raise errors.InternalServerError(
"Request {}:{} failed: {}".format(method, url, error_message),
response,
)
return response
|
Make request to Podman service.
|
https://github.com/containers/podman-py/blob/7cff4162c6cbe3161d9a36bc645e1f11972bf2a9/podman/api_connection.py#L95-L140
|
import json
import logging
import socket
import urllib.parse
import warnings
from contextlib import AbstractContextManager
from http import HTTPStatus
from http.client import HTTPConnection
import podman.containers as containers
import podman.errors as errors
import podman.images as images
import podman.system as system
class ApiConnection(HTTPConnection, AbstractContextManager):
def __init__(self, url, base="/v2.0.0/libpod", *args, **kwargs):
if url is None or not url:
raise ValueError("url is required for service connection.")
super().__init__("localhost", *args, **kwargs)
supported_schemes = ("unix", "ssh")
uri = urllib.parse.urlparse(url)
if uri.scheme not in supported_schemes:
raise ValueError(
"The scheme '{}' is not supported, only {}".format(uri.scheme, supported_schemes)
)
self.uri = uri
self.base = base
warnings.warn("APIConnection() and supporting classes.", PendingDeprecationWarning)
def connect(self):
if self.uri.scheme == "unix":
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.uri.path)
self.sock = sock
else:
raise NotImplementedError("Scheme {} not yet implemented".format(self.uri.scheme))
def delete(self, path, params=None):
return self.request("DELETE", self.join(path, params))
def get(self, path, params=None):
return self.request("GET", self.join(path, params))
def post(self, path, params=None, headers=None, encode=False):
data = params
if not headers:
headers = {}
if encode:
if "content-type" not in set(key.lower() for key in headers) and params:
headers["content-type"] = "application/x-www-form-urlencoded"
data = urllib.parse.urlencode(params)
return self.request('POST', self.join(path), body=data, headers=headers)
|
Apache License 2.0
|
square/connect-python-sdk
|
squareconnect/models/search_orders_request.py
|
SearchOrdersRequest.limit
|
python
|
def limit(self):
return self._limit
|
Gets the limit of this SearchOrdersRequest.
Number of results to be returned in a single page. SearchOrders may use a smaller limit than specified depending on server load. If the response includes a `cursor` field, you can use it to retrieve the next set of results. Default: `500`
:return: The limit of this SearchOrdersRequest.
:rtype: int
|
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/search_orders_request.py#L131-L139
|
from pprint import pformat
from six import iteritems
import re
class SearchOrdersRequest(object):
def __init__(self, location_ids=None, cursor=None, query=None, limit=None, return_entries=None):
self.swagger_types = {
'location_ids': 'list[str]',
'cursor': 'str',
'query': 'SearchOrdersQuery',
'limit': 'int',
'return_entries': 'bool'
}
self.attribute_map = {
'location_ids': 'location_ids',
'cursor': 'cursor',
'query': 'query',
'limit': 'limit',
'return_entries': 'return_entries'
}
self._location_ids = location_ids
self._cursor = cursor
self._query = query
self._limit = limit
self._return_entries = return_entries
@property
def location_ids(self):
return self._location_ids
@location_ids.setter
def location_ids(self, location_ids):
self._location_ids = location_ids
@property
def cursor(self):
return self._cursor
@cursor.setter
def cursor(self, cursor):
self._cursor = cursor
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = query
@property
|
Apache License 2.0
|
kane610/deconz
|
pydeconz/sensor.py
|
LightLevel.scaled_light_level
|
python
|
def scaled_light_level(self) -> Optional[float]:
if self.light_level is None:
return None
return round(10 ** (float(self.light_level - 1) / 10000), 1)
|
Scaled light level.
|
https://github.com/kane610/deconz/blob/ba29b152ca99a72161bc156bfa83d80630073b28/pydeconz/sensor.py#L499-L504
|
import logging
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Union
from .api import APIItems
from .deconz_device import DeconzDevice
LOGGER = logging.getLogger(__name__)
RESOURCE_TYPE = "sensors"
URL = "/sensors"
ANCILLARY_CONTROL_ARMED_AWAY = "armed_away"
ANCILLARY_CONTROL_ARMED_NIGHT = "armed_night"
ANCILLARY_CONTROL_ARMED_STAY = "armed_stay"
ANCILLARY_CONTROL_DISARMED = "disarmed"
ANCILLARY_CONTROL_EMERGENCY = "emergency"
ANCILLARY_CONTROL_FIRE = "fire"
ANCILLARY_CONTROL_INVALID_CODE = "invalid_code"
ANCILLARY_CONTROL_PANIC = "panic"
ANCILLARY_CONTROL_ARMING_AWAY = "arming_away"
ANCILLARY_CONTROL_ARMING_NIGHT = "arming_night"
ANCILLARY_CONTROL_ARMING_STAY = "arming_stay"
ANCILLARY_CONTROL_ENTRY_DELAY = "entry_delay"
ANCILLARY_CONTROL_EXIT_DELAY = "exit_delay"
ANCILLARY_CONTROL_IN_ALARM = "in_alarm"
ANCILLARY_CONTROL_NOT_READY = "not_ready"
DAYLIGHT_STATUS = {
100: "nadir",
110: "night_end",
120: "nautical_dawn",
130: "dawn",
140: "sunrise_start",
150: "sunrise_end",
160: "golden_hour_1",
170: "solar_noon",
180: "golden_hour_2",
190: "sunset_start",
200: "sunset_end",
210: "dusk",
220: "nautical_dusk",
230: "night_start",
}
DEVICE_MODE_DUAL_PUSH_BUTTON = "dualpushbutton"
DEVICE_MODE_DUAL_ROCKER = "dualrocker"
DEVICE_MODE_SINGLE_PUSH_BUTTON = "singlepushbutton"
DEVICE_MODE_SINGLE_ROCKER = "singlerocker"
PRESENCE_DELAY = "delay"
PRESENCE_DURATION = "duration"
PRESENCE_SENSITIVITY = "sensitivity"
PRESENCE_SENSITIVITY_MAX = "sensitivitymax"
PRESENCE_DARK = "dark"
PRESENCE_PRESENCE = "presence"
THERMOSTAT_MODE_AUTO = "auto"
THERMOSTAT_MODE_COOL = "cool"
THERMOSTAT_MODE_DRY = "dry"
THERMOSTAT_MODE_FAN_ONLY = "fan only"
THERMOSTAT_MODE_HEAT = "heat"
THERMOSTAT_MODE_EMERGENCY_HEATING = "emergency heating"
THERMOSTAT_MODE_OFF = "off"
THERMOSTAT_MODE_PRECOOLING = "precooling"
THERMOSTAT_MODE_SLEEP = "sleep"
THERMOSTAT_FAN_MODE_AUTO = "auto"
THERMOSTAT_FAN_MODE_HIGH = "high"
THERMOSTAT_FAN_MODE_LOW = "low"
THERMOSTAT_FAN_MODE_MEDIUM = "medium"
THERMOSTAT_FAN_MODE_OFF = "off"
THERMOSTAT_FAN_MODE_ON = "on"
THERMOSTAT_FAN_MODE_SMART = "smart"
THERMOSTAT_PRESET_AUTO = "auto"
THERMOSTAT_PRESET_BOOST = "boost"
THERMOSTAT_PRESET_COMFORT = "comfort"
THERMOSTAT_PRESET_COMPLEX = "complex"
THERMOSTAT_PRESET_ECO = "eco"
THERMOSTAT_PRESET_HOLIDAY = "holiday"
THERMOSTAT_PRESET_MANUAL = "manual"
THERMOSTAT_SWING_MODE_FULLY_CLOSED = "fully closed"
THERMOSTAT_SWING_MODE_FULLY_OPEN = "fully open"
THERMOSTAT_SWING_MODE_HALF_OPEN = "half open"
THERMOSTAT_SWING_MODE_QUARTER_OPEN = "quarter open"
THERMOSTAT_SWING_MODE_THREE_QUARTERS_OPEN = "three quarters open"
THERMOSTAT_TEMPERATURE_MEASUREMENT_MODE_AIR_SENSOR = "air sensor"
THERMOSTAT_TEMPERATURE_MEASUREMENT_MODE_FLOOR_PROTECTION = "floor protection"
THERMOSTAT_TEMPERATURE_MEASUREMENT_MODE_FLOOR_SENSOR = "floor sensor"
class Sensors(APIItems):
def __init__(
self,
raw: dict,
request: Callable[..., Awaitable[Dict[str, Any]]],
) -> None:
super().__init__(raw, request, URL, create_sensor)
class DeconzSensor(DeconzDevice):
BINARY = False
ZHATYPE: tuple = ()
STATE_PROPERTY = "on"
@property
def resource_type(self) -> str:
return RESOURCE_TYPE
@property
def state(self) -> Union[bool, int, str, None]:
return getattr(self, self.STATE_PROPERTY)
@property
def battery(self) -> Optional[int]:
return self.raw["config"].get("battery")
@property
def config_pending(self) -> Optional[list]:
return self.raw["config"].get("pending")
@property
def ep(self) -> Optional[int]:
return self.raw.get("ep")
@property
def low_battery(self) -> Optional[bool]:
return self.raw["state"].get("lowbattery")
@property
def on(self) -> Optional[bool]:
return self.raw["config"].get("on")
@property
def reachable(self) -> bool:
return self.raw["config"].get("reachable", True)
@property
def tampered(self) -> Optional[bool]:
return self.raw["state"].get("tampered")
@property
def secondary_temperature(self) -> Optional[float]:
if "temperature" not in self.raw["config"]:
return None
return Temperature.convert_temperature(self.raw["config"].get("temperature"))
class DeconzBinarySensor(DeconzSensor):
BINARY = True
class AirQuality(DeconzSensor):
STATE_PROPERTY = "air_quality"
ZHATYPE = ("ZHAAirQuality",)
@property
def air_quality(self) -> str:
return self.raw["state"]["airquality"]
@property
def air_quality_ppb(self) -> int:
return self.raw["state"]["airqualityppb"]
class Alarm(DeconzBinarySensor):
STATE_PROPERTY = "alarm"
ZHATYPE = ("ZHAAlarm",)
@property
def alarm(self) -> bool:
return self.raw["state"]["alarm"]
class AncillaryControl(DeconzSensor):
STATE_PROPERTY = "panel"
ZHATYPE = ("ZHAAncillaryControl",)
@property
def action(self) -> str:
return self.raw["state"]["action"]
@property
def panel(self) -> Optional[str]:
return self.raw["state"].get("panel")
@property
def seconds_remaining(self) -> int:
return self.raw["state"].get("seconds_remaining", 0)
class Battery(DeconzSensor):
STATE_PROPERTY = "battery"
ZHATYPE = ("ZHABattery",)
@property
def battery(self) -> int:
return self.raw["state"]["battery"]
class CarbonMonoxide(DeconzBinarySensor):
STATE_PROPERTY = "carbon_monoxide"
ZHATYPE = ("ZHACarbonMonoxide",)
@property
def carbon_monoxide(self) -> bool:
return self.raw["state"]["carbonmonoxide"]
class Consumption(DeconzSensor):
STATE_PROPERTY = "scaled_consumption"
ZHATYPE = ("ZHAConsumption",)
@property
def scaled_consumption(self) -> Optional[float]:
if self.consumption is None:
return None
return float(self.consumption / 1000)
@property
def consumption(self) -> Optional[int]:
return self.raw["state"].get("consumption")
@property
def power(self) -> Optional[int]:
return self.raw["state"].get("power")
class Daylight(DeconzSensor):
STATE_PROPERTY = "status"
ZHATYPE = ("Daylight",)
@property
def configured(self) -> bool:
return self.raw["config"]["configured"]
@property
def daylight(self) -> bool:
return self.raw["state"]["daylight"]
@property
def status(self) -> str:
return DAYLIGHT_STATUS.get(self.raw["state"]["status"], "unknown")
@property
def sunrise_offset(self) -> int:
return self.raw["config"]["sunriseoffset"]
@property
def sunset_offset(self) -> int:
return self.raw["config"]["sunsetoffset"]
class DoorLock(DeconzSensor):
STATE_PROPERTY = "lock_state"
ZHATYPE = ("ZHADoorLock",)
@property
def is_locked(self) -> bool:
return self.lock_state == "locked"
@property
def lock_state(self) -> str:
return self.raw["state"]["lockstate"]
@property
def lock_configuration(self) -> bool:
return self.raw["config"]["lock"]
async def lock(self) -> dict:
return await self.request(
field=f"{self.deconz_id}/config",
data={"lock": True},
)
async def unlock(self) -> dict:
return await self.request(
field=f"{self.deconz_id}/config",
data={"lock": False},
)
class Fire(DeconzBinarySensor):
STATE_PROPERTY = "fire"
ZHATYPE = ("ZHAFire",)
@property
def fire(self) -> bool:
return self.raw["state"]["fire"]
class GenericFlag(DeconzBinarySensor):
STATE_PROPERTY = "flag"
ZHATYPE = ("CLIPGenericFlag",)
@property
def flag(self) -> bool:
return self.raw["state"]["flag"]
class GenericStatus(DeconzSensor):
STATE_PROPERTY = "status"
ZHATYPE = ("CLIPGenericStatus",)
@property
def status(self) -> str:
return self.raw["state"]["status"]
class Humidity(DeconzSensor):
STATE_PROPERTY = "scaled_humidity"
ZHATYPE = ("ZHAHumidity", "CLIPHumidity")
@property
def scaled_humidity(self) -> Optional[float]:
if self.humidity is None:
return None
return round(float(self.humidity) / 100, 1)
@property
def humidity(self) -> Optional[int]:
return self.raw["state"].get("humidity")
@property
def offset(self) -> Optional[int]:
return self.raw["config"].get("offset")
async def set_config(
self,
offset: Optional[int] = None,
) -> dict:
data = {
key: value
for key, value in {
"offset": offset,
}.items()
if value is not None
}
return await self.request(field=f"{self.deconz_id}/config", data=data)
class LightLevel(DeconzSensor):
STATE_PROPERTY = "scaled_light_level"
ZHATYPE = ("ZHALightLevel", "CLIPLightLevel")
@property
|
MIT License
|
arelle/arelle
|
arelle/TkTableWrapper.py
|
Table.reread
|
python
|
def reread(self):
self.tk.call(self._w, 'reread')
|
Rereads the old contents of the cell back into the editing buffer.
Useful for a key binding when <Escape> is pressed to abort the edit
(a default binding).
|
https://github.com/arelle/arelle/blob/f9b83eb6c95be457c9fe07dda8e3f6207f0ec9af/arelle/TkTableWrapper.py#L320-L324
|
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["ArrayVar", "Table"]
import os
import collections
try:
import tkinter
except ImportError:
import Tkinter as tkinter
import sys
def _setup_master(master):
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError("No master specified and Tkinter is "
"configured to not support default master")
return master
class ArrayVar(tkinter.Variable):
def __init__(self, master=None, name=None):
self._master = _setup_master(master)
self._tk = self._master.tk
if name:
self._name = name
else:
self._name = 'PY_VAR%s' % id(self)
def __del__(self):
if bool(self._tk.call('info', 'exists', self._name)):
self._tk.globalunsetvar(self._name)
def __len__(self):
return int(self._tk.call('array', 'size', str(self)))
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.set(**{str(key): value})
def names(self):
return self._tk.call('array', 'names', self._name)
def get(self, key=None):
if key is None:
flatten_pairs = self._tk.call('array', 'get', str(self))
return dict(list(zip(flatten_pairs[::2], flatten_pairs[1::2])))
return self._tk.globalgetvar(str(self), str(key))
def set(self, **kw):
self._tk.call(
'array', 'set', str(self), tkinter._flatten(list(kw.items())))
def unset(self, pattern=None):
self._tk.call('array', 'unset', str(self), pattern)
_TKTABLE_LOADED = False
class Table(tkinter.Widget):
_switches = ('holddimensions', 'holdselection', 'holdtags', 'holdwindows',
'keeptitles', '-')
_tabsubst_format = ('%c', '%C', '%i', '%r', '%s', '%S', '%W')
_tabsubst_commands = ('browsecommand', 'browsecmd', 'command',
'selectioncommand', 'selcmd',
'validatecommand', 'valcmd')
def __init__(self, master=None, **kw):
master = _setup_master(master)
global _TKTABLE_LOADED
if not _TKTABLE_LOADED:
tktable_lib = os.environ.get('TKTABLE_LIBRARY')
if tktable_lib:
master.tk.eval('global auto_path; '
'lappend auto_path {%s}' % tktable_lib)
master.tk.call('package', 'require', 'Tktable')
_TKTABLE_LOADED = True
if not 'padx' in kw:
kw['padx'] = 1
if not 'pady' in kw:
kw['pady'] = 1
tkinter.Widget.__init__(self, master, 'table', kw)
self.contextMenuClick = "<Button-2>" if sys.platform=="darwin" else "<Button-3>"
def _options(self, cnf, kw=None):
if kw:
cnf = tkinter._cnfmerge((cnf, kw))
else:
cnf = tkinter._cnfmerge(cnf)
res = ()
for k, v in cnf.items():
if isinstance(v, collections.Callable):
if k in self._tabsubst_commands:
v = "%s %s" % (self._register(v, self._tabsubst),
' '.join(self._tabsubst_format))
else:
v = self._register(v)
res += ('-%s' % k, v)
return res
def _tabsubst(self, *args):
if len(args) != len(self._tabsubst_format):
return args
tk = self.tk
c, C, i, r, s, S, W = args
e = tkinter.Event()
e.widget = self
e.c = tk.getint(c)
e.i = tk.getint(i)
e.r = tk.getint(r)
e.C = "%d,%d" % (e.r, e.c)
e.s = s
e.S = S
try:
e.W = self._nametowidget(W)
except KeyError:
e.W = None
return (e,)
def _handle_switches(self, args):
args = args or ()
return tuple(('-%s' % x) for x in args if x in self._switches)
def activate(self, index):
self.tk.call(self._w, 'activate', index)
def bbox(self, first, last=None):
return self._getints(self.tk.call(self._w, 'bbox', first, last)) or ()
def clear(self, option, first=None, last=None):
self.tk.call(self._w, 'clear', option, first, last)
def clear_cache(self, first=None, last=None):
self.clear('cache', first, last)
def clear_sizes(self, first=None, last=None):
self.clear('sizes', first, last)
def clear_tags(self, first=None, last=None):
self.clear('tags', first, last)
def clear_all(self, first=None, last=None):
self.clear('all', first, last)
def curselection(self, value=None):
result = self.tk.call(self._w, 'curselection', value)
if value is None:
return result
def curvalue(self, value=None):
return self.tk.call(self._w, 'curvalue', value)
def delete_active(self, index1, index2=None):
self.tk.call(self._w, 'delete', 'active', index1, index2)
def delete_cols(self, index, count=None, switches=None):
args = self._handle_switches(switches) + (index, count)
self.tk.call(self._w, 'delete', 'cols', *args)
def delete_rows(self, index, count=None, switches=None):
args = self._handle_switches(switches) + (index, count)
self.tk.call(self._w, 'delete', 'rows', *args)
def get(self, first, last=None):
return self.tk.call(self._w, 'get', first, last)
def height(self, row=None, **kwargs):
if row is None and not kwargs:
pairs = self.tk.splitlist(self.tk.call(self._w, 'height'))
return dict(pair.split() for pair in pairs)
elif row:
return int(self.tk.call(self._w, 'height', str(row)))
args = tkinter._flatten(list(kwargs.items()))
self.tk.call(self._w, 'height', *args)
def hidden(self, *args):
return self.tk.call(self._w, 'hidden', *args)
def icursor(self, arg=None):
return self.tk.call(self._w, 'icursor', arg)
def index(self, index, rc=None):
res = self.tk.call(self._w, 'index', index, rc)
if rc is None:
return res
else:
return int(res)
def insert_active(self, index, value):
self.tk.call(self._w, 'insert', 'active', index, value)
def insert_cols(self, index, count=None, switches=None):
args = self._handle_switches(switches) + (index, count)
self.tk.call(self._w, 'insert', 'cols', *args)
def insert_rows(self, index, count=None, switches=None):
args = self._handle_switches(switches) + (index, count)
self.tk.call(self._w, 'insert', 'rows', *args)
|
Apache License 2.0
|
cityofsantamonica/mds-provider
|
mds/providers.py
|
Registry._parse_csv
|
python
|
def _parse_csv(lines, **kwargs):
return [Provider(**record, **kwargs) for record in csv.DictReader(lines)]
|
Parse CSV lines into a list of Provider instances.
|
https://github.com/cityofsantamonica/mds-provider/blob/02abcb227c35cdfe78a39e35b3157f7c2916c028/mds/providers.py#L192-L196
|
import csv
import pathlib
import uuid
import requests
import mds.github
from .schemas import STATUS_CHANGES, TRIPS, EVENTS, VEHICLES
from .versions import Version
class Provider():
def __init__(self, identifier=None, ref=mds.github.MDS_DEFAULT_REF, path=None, **kwargs):
if not identifier:
self.provider_name = kwargs.pop("provider_name", None)
provider_id = kwargs.pop("provider_id", None)
self.provider_id = provider_id if isinstance(provider_id, uuid.UUID) else uuid.UUID(provider_id)
self.auth_type = kwargs.pop("auth_type", "Bearer")
self.gbfs_api_url = self._clean_url(kwargs.pop("gbfs_api_url", None))
self.headers = kwargs.pop("headers", {})
self.mds_api_suffix = kwargs.pop("mds_api_suffix", None)
self.mds_api_url = self._clean_url(kwargs.pop("mds_api_url", None))
self.registry_path = path
self.registry_ref = ref
self.url = self._clean_url(kwargs.pop("url", None))
try:
self.version = Version(ref)
except:
pass
for k,v in kwargs.items():
setattr(self, k, v)
elif isinstance(identifier, Provider):
_kwargs = vars(identifier)
_kwargs.update(kwargs)
Provider.__init__(self, ref=identifier.registry_ref, path=identifier.registry_path, **_kwargs)
else:
provider = Registry(ref=ref, path=path).find(identifier, **kwargs)
if provider:
Provider.__init__(self, provider)
def __repr__(self):
ref, name, pid, url = (
self.registry_ref or self.registry_path,
self.provider_name,
str(self.provider_id),
self.mds_api_url
)
return f"<mds.providers.Provider ('{ref}', '{name}', '{pid}', '{url}')>"
@property
def endpoints(self):
endpoint = [self.mds_api_url]
if self.mds_api_suffix:
endpoint.append(self.mds_api_suffix.rstrip("/"))
return {
STATUS_CHANGES: "/".join(endpoint + [STATUS_CHANGES]),
TRIPS: "/".join(endpoint + [TRIPS]),
EVENTS: "/".join(endpoint + [EVENTS]),
VEHICLES: "/".join(endpoint + [VEHICLES])
}
@staticmethod
def _clean_url(url):
if url:
url = url.lower().rstrip("/")
return url if url.startswith("https://") else f"https://{url}"
else:
return None
class Registry():
_registry = {}
def __init__(self, ref=mds.github.MDS_DEFAULT_REF, path=None, **kwargs):
key = (str(ref), path)
if key not in self._registry:
self._registry[key] = self._get_registry(*key)
self.providers = self._registry[key]
self.ref = ref
self.path = path
def __repr__(self):
data = "'" + "', '".join([str(self.ref or self.path), str(len(self.providers)) + " providers"]) + "'"
return f"<mds.files.Registry ({data})>"
def find(self, provider, **kwargs):
try:
provider = uuid.UUID(provider)
except ValueError:
pass
found = next((p for p in self.providers if any([
isinstance(provider, str) and p.provider_name.lower() == provider.lower(),
isinstance(provider, uuid.UUID) and p.provider_id == provider
])), None)
return Provider(found, **kwargs) if found else None
@staticmethod
def _get_registry(ref, path):
if path:
path = pathlib.Path(path)
with path.open("r") as f:
return Registry._parse_csv(f.readlines(), ref=ref, path=path)
else:
url = mds.github.registry_url(ref)
with requests.get(url, stream=True) as r:
lines = (line.decode("utf-8").replace(", ", ",") for line in r.iter_lines())
return Registry._parse_csv(lines, ref=ref, path=path)
@staticmethod
|
MIT License
|
aio-libs/aiozipkin
|
aiozipkin/aiohttp_helpers.py
|
get_tracer
|
python
|
def get_tracer(app: Application, tracer_key: str = APP_AIOZIPKIN_KEY) -> Tracer:
return cast(Tracer, app[tracer_key])
|
Returns tracer object from application context.
By default tracer has APP_AIOZIPKIN_KEY in aiohttp application context,
you can provide own key, if for some reason default one is not suitable.
|
https://github.com/aio-libs/aiozipkin/blob/ce9e238e3a16b34ca9c30a1b177fea6465d066e2/aiozipkin/aiohttp_helpers.py#L206-L212
|
import ipaddress
import sys
from contextlib import contextmanager
from types import SimpleNamespace
from typing import (
Any,
Awaitable,
Callable,
Dict,
Generator,
Iterable,
Optional,
Set,
cast,
)
import aiohttp
from aiohttp import (
TraceRequestEndParams,
TraceRequestExceptionParams,
TraceRequestStartParams,
)
from aiohttp.web import (
AbstractRoute,
Application,
HTTPException,
Request,
StreamResponse,
middleware,
)
from .constants import HTTP_METHOD, HTTP_PATH, HTTP_ROUTE, HTTP_STATUS_CODE
from .helpers import (
CLIENT,
SERVER,
TraceContext,
make_context,
parse_debug_header,
parse_sampled_header,
)
from .span import SpanAbc
from .tracer import Tracer
APP_AIOZIPKIN_KEY = "aiozipkin_tracer"
REQUEST_AIOZIPKIN_KEY = "aiozipkin_span"
__all__ = (
"setup",
"get_tracer",
"request_span",
"middleware_maker",
"make_trace_config",
"APP_AIOZIPKIN_KEY",
"REQUEST_AIOZIPKIN_KEY",
)
Handler = Callable[[Request], Awaitable[StreamResponse]]
Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
def _set_remote_endpoint(span: SpanAbc, request: Request) -> None:
peername = request.remote
if peername is not None:
kwargs: Dict[str, Any] = {}
try:
peer_ipaddress = ipaddress.ip_address(peername)
except ValueError:
pass
else:
if isinstance(peer_ipaddress, ipaddress.IPv4Address):
kwargs["ipv4"] = str(peer_ipaddress)
else:
kwargs["ipv6"] = str(peer_ipaddress)
if kwargs:
span.remote_endpoint(None, **kwargs)
def _get_span(request: Request, tracer: Tracer) -> SpanAbc:
context = make_context(request.headers)
if context is None:
sampled = parse_sampled_header(request.headers)
debug = parse_debug_header(request.headers)
span = tracer.new_trace(sampled=sampled, debug=debug)
else:
span = tracer.join_span(context)
return span
def _set_span_properties(span: SpanAbc, request: Request) -> None:
span_name = f"{request.method.upper()} {request.path}"
span.name(span_name)
span.kind(SERVER)
span.tag(HTTP_PATH, request.path)
span.tag(HTTP_METHOD, request.method.upper())
resource = request.match_info.route.resource
if resource is not None:
route = resource.canonical
span.tag(HTTP_ROUTE, route)
_set_remote_endpoint(span, request)
PY37 = sys.version_info >= (3, 7)
if PY37:
from contextvars import ContextVar
OptTraceVar = ContextVar[Optional[TraceContext]]
zipkin_context: OptTraceVar = ContextVar("zipkin_context", default=None)
@contextmanager
def set_context_value(
context_var: OptTraceVar, value: TraceContext
) -> Generator[OptTraceVar, None, None]:
token = context_var.set(value)
try:
yield context_var
finally:
context_var.reset(token)
def middleware_maker(
skip_routes: Optional[Iterable[AbstractRoute]] = None,
tracer_key: str = APP_AIOZIPKIN_KEY,
request_key: str = REQUEST_AIOZIPKIN_KEY,
) -> Middleware:
s = skip_routes
skip_routes_set: Set[AbstractRoute] = set(s) if s else set()
@middleware
async def aiozipkin_middleware(
request: Request, handler: Handler
) -> StreamResponse:
if request.match_info.route in skip_routes_set:
resp = await handler(request)
return resp
tracer = request.app[tracer_key]
span = _get_span(request, tracer)
request[request_key] = span
if span.is_noop:
resp = await handler(request)
return resp
if PY37:
with set_context_value(zipkin_context, span.context):
with span:
_set_span_properties(span, request)
try:
resp = await handler(request)
except HTTPException as e:
span.tag(HTTP_STATUS_CODE, str(e.status))
raise
span.tag(HTTP_STATUS_CODE, str(resp.status))
else:
with span:
_set_span_properties(span, request)
try:
resp = await handler(request)
except HTTPException as e:
span.tag(HTTP_STATUS_CODE, str(e.status))
raise
span.tag(HTTP_STATUS_CODE, str(resp.status))
return resp
return aiozipkin_middleware
def setup(
app: Application,
tracer: Tracer,
*,
skip_routes: Optional[Iterable[AbstractRoute]] = None,
tracer_key: str = APP_AIOZIPKIN_KEY,
request_key: str = REQUEST_AIOZIPKIN_KEY,
) -> Application:
app[tracer_key] = tracer
m = middleware_maker(
skip_routes=skip_routes, tracer_key=tracer_key, request_key=request_key
)
app.middlewares.append(m)
async def close_aiozipkin(app: Application) -> None:
await app[tracer_key].close()
app.on_cleanup.append(close_aiozipkin)
return app
|
Apache License 2.0
|
shad0w008/scanver
|
core/brute.py
|
Hydra.parse_result
|
python
|
def parse_result(self,stdout):
try:
for line in stdout.readlines():
line = str(line).strip('\r\n')
if not line:
continue
logging.info(line)
m = re.findall(r'host: (\S*).*login: (\S*).*password:(.*)', line)
if m and m[0] and len(m[0]) == 3:
username = m[0][1]
password = m[0][2].strip()
self.result.add((
self.service,
self.host,
self.port,
username,
password,
line))
except Exception as e:
logging.error('[PortCrackBase][parse_result_hydra] Exception %s' % e)
|
[21][ftp] host: 10.15.154.142 login: ftpftp password: h123123a
|
https://github.com/shad0w008/scanver/blob/cbe03d38422641e83275bfbc8d1ae93f42c3f387/core/brute.py#L63-L85
|
import os
import random
import re
import string
import subprocess
from log import logging
class Hydra(object):
HYDRA = './lib/hydra/hydra.exe'
def randstr(self, length=16):
return ''.join(random.sample("qwertyuiopasdfghjklzxcvbnm1234567890", length))
def __init__(self,username,password):
self.result = set()
self.projectid = './data/' + self.randstr()
if not os.path.exists(self.projectid):
os.mkdir(self.projectid)
self.fresult = self.projectid +'/result.txt'
self.fusername = self.projectid +'/user.txt'
self.fpassword = self.projectid +'/passwd.txt'
with open(self.fusername,'w') as fu:
fu.write('\n'.join(username if isinstance(username,list) else [username]))
with open(self.fpassword,'w') as fp:
fp.write('\n'.join(password if isinstance(password,list) else [password]))
def __del__(self):
os.remove(self.fresult)
os.remove(self.fusername)
os.remove(self.fpassword)
if self.ftarget:
os.remove(self.ftarget)
os.removedirs(self.projectid)
def start(self,service,host,port):
self.result = set()
self.service = service
self.host = host
self.port = port
args = {
'hydra' :self.HYDRA,
'username' :self.fusername,
'passwd' :self.fpassword,
'resultfile':self.fresult,
'service' :self.service,
'host' :self.host,
'port' :self.port
}
options = '{hydra} -L {username} -P {passwd} -o {resultfile}'
options += ' -s {port} {host}'
options += ' {service}'
cmd = options.format(**args)
print(cmd)
proc = subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
print(proc.pid)
self.parse_result(proc.stdout)
|
Apache License 2.0
|
kuri65536/python-for-android
|
python-modules/twisted/twisted/manhole/telnet.py
|
ShellFactory.__getstate__
|
python
|
def __getstate__(self):
dict = self.__dict__
ns = copy.copy(dict['namespace'])
dict['namespace'] = ns
if ns.has_key('__builtins__'):
del ns['__builtins__']
return dict
|
This returns the persistent state of this shell factory.
|
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/manhole/telnet.py#L109-L117
|
from twisted.protocols import telnet
from twisted.internet import protocol
from twisted.python import log, failure
import string, copy, sys
from cStringIO import StringIO
class Shell(telnet.Telnet):
def connectionMade(self):
telnet.Telnet.connectionMade(self)
self.lineBuffer = []
def loggedIn(self):
self.transport.write(">>> ")
def checkUserAndPass(self, username, password):
return ((self.factory.username == username) and (password == self.factory.password))
def write(self, data):
self.transport.write(data)
def telnet_Command(self, cmd):
if self.lineBuffer:
if not cmd:
cmd = string.join(self.lineBuffer, '\n') + '\n\n\n'
self.doCommand(cmd)
self.lineBuffer = []
return "Command"
else:
self.lineBuffer.append(cmd)
self.transport.write("... ")
return "Command"
else:
self.doCommand(cmd)
return "Command"
def doCommand(self, cmd):
fn = '$telnet$'
result = None
try:
out = sys.stdout
sys.stdout = self
try:
code = compile(cmd,fn,'eval')
result = eval(code, self.factory.namespace)
except:
try:
code = compile(cmd, fn, 'exec')
exec code in self.factory.namespace
except SyntaxError, e:
if not self.lineBuffer and str(e)[:14] == "unexpected EOF":
self.lineBuffer.append(cmd)
self.transport.write("... ")
return
else:
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
except:
io = StringIO()
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
finally:
sys.stdout = out
self.factory.namespace['_'] = result
if result is not None:
self.transport.write(repr(result))
self.transport.write('\r\n')
self.transport.write(">>> ")
class ShellFactory(protocol.Factory):
username = "admin"
password = "admin"
protocol = Shell
service = None
def __init__(self):
self.namespace = {
'factory': self,
'service': None,
'_': None
}
def setService(self, service):
self.namespace['service'] = self.service = service
|
Apache License 2.0
|
netflix/dispatch
|
src/dispatch/incident_role/views.py
|
get_incident_roles
|
python
|
def get_incident_roles(
*,
db_session: Session = Depends(get_db),
role: ParticipantRoleType,
project_name: str = Query(..., alias="projectName"),
):
project = project_service.get_by_name_or_raise(
db_session=db_session, project_in=ProjectRead(name=project_name)
)
policies = get_all_by_role(db_session=db_session, role=role, project_id=project.id)
return {"policies": policies}
|
Get all incident role mappings.
|
https://github.com/netflix/dispatch/blob/e30705938e970d8ef0dfdd04246a3f3004a6a44f/src/dispatch/incident_role/views.py#L22-L33
|
from fastapi import APIRouter, Depends, Query
from sqlalchemy.orm import Session
from dispatch.database.core import get_db
from dispatch.auth.permissions import SensitiveProjectActionPermission, PermissionsDependency
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.project.models import ProjectRead
from dispatch.project import service as project_service
from .models import (
IncidentRoles,
IncidentRolesCreateUpdate,
)
from .service import create_or_update, get_all_by_role
router = APIRouter()
@router.get("/{role}", response_model=IncidentRoles)
|
Apache License 2.0
|
infinidat/infi.clickhouse_orm
|
src/infi/clickhouse_orm/funcs.py
|
aggregate
|
python
|
def aggregate(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.f_type = 'aggregate'
return wrapper
|
Decorates a function to mark it as an aggregate function.
The metaclass automatically generates combinators such as "OrDefault",
"OrNull", "If" etc. for the decorated function.
|
https://github.com/infinidat/infi.clickhouse_orm/blob/76d432b838953fb7f762a9fe55fd94ce47520f01/src/infi/clickhouse_orm/funcs.py#L34-L44
|
from functools import wraps
from inspect import signature, Parameter
from types import FunctionType
from .utils import is_iterable, comma_join, NO_VALUE, arg_to_sql
from .query import Cond, QuerySet
def binary_operator(func):
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
ret.is_binary_operator = True
return ret
return wrapper
def type_conversion(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.f_type = 'type_conversion'
return wrapper
|
BSD 3-Clause New or Revised License
|
lisa-lab/pylearn2
|
pylearn2/sandbox/rnn/space/__init__.py
|
SequenceDataSpace._check_sizes
|
python
|
def _check_sizes(self, space):
my_dimension = self.get_total_dimension()
other_dimension = space.get_total_dimension()
if my_dimension != other_dimension:
if isinstance(space, Conv2DSpace):
if my_dimension * space.shape[0] != other_dimension:
raise ValueError(str(self)+" with total dimension " +
str(my_dimension) +
" can't format a batch into " +
str(space) + "because its total dimension\
is " +
str(other_dimension))
|
Called by self._format_as(space), to check whether self and space
have compatible sizes. Throws a ValueError if they don't.
|
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/sandbox/rnn/space/__init__.py#L157-L173
|
from __future__ import print_function
from functools import wraps
import numpy as np
from theano import scan, config
from theano import tensor
from theano.tensor import TensorType
from pylearn2 import space
from pylearn2.space import Conv2DSpace
from pylearn2.utils import is_iterable
class SequenceSpace(space.CompositeSpace):
def __init__(self, space):
self.space = space
self.mask_space = SequenceMaskSpace()
self.data_space = SequenceDataSpace(space)
self.dim = space.get_total_dimension()
super(SequenceSpace, self).__init__([self.data_space, self.mask_space])
self._dtype = self._clean_dtype_arg(space.dtype)
@wraps(space.Space.__eq__)
def __eq__(self, other):
if (not isinstance(other, self.__class__) and
not issubclass(self.__class__, other)):
return False
return self.space == other.space
@wraps(space.Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
data_batch = self.data_space.make_theano_batch(name=name, dtype=dtype,
batch_size=batch_size)
mask_batch = self.mask_space.make_theano_batch(name=name, dtype=dtype,
batch_size=batch_size)
return (data_batch, mask_batch)
@wraps(space.Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
return self.data_space._batch_size_impl(is_numeric, batch[0])
@wraps(space.Space.get_total_dimension)
def get_total_dimension(self):
return self.space.get_total_dimension()
@wraps(space.Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
assert is_iterable(batch) and len(batch) == 2
self.data_space._validate_impl(is_numeric, batch[0])
self.mask_space._validate_impl(is_numeric, batch[1])
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.space)
@wraps(space.Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
assert isinstance(space, SequenceSpace)
if is_numeric:
rval = np.apply_over_axes(
lambda batch, axis: self.space._format_as_impl(
is_numeric=is_numeric,
batch=batch,
space=space.space),
batch, 0)
else:
NotImplementedError("Can't convert SequenceSpace Theano variables")
return rval
class SequenceDataSpace(space.SimplyTypedSpace):
def __init__(self, space):
self.dim = space.get_total_dimension()
self.space = space
self._dtype = self._clean_dtype_arg(space.dtype)
super(SequenceDataSpace, self).__init__(space.dtype)
def __eq__(self, other):
if not isinstance(other, SequenceDataSpace):
return False
return self.space == other.space
def __hash__(self):
return hash(self.space)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.space)
@wraps(space.Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
if space == self:
return batch
else:
if isinstance(space, SequenceDataSpace):
if is_numeric:
formatted_batch = np.transpose(np.asarray([
self.space._format_as_impl(is_numeric, sample,
space.space)
for sample in np.transpose(batch, (1, 0, 2))
]), (1, 0, 2))
else:
formatted_batch, _ = scan(
fn=lambda elem: self.space._format_as_impl(
is_numeric, elem, space.space),
sequences=[batch]
)
return formatted_batch
elif isinstance(space, space.VectorSpace):
row = batch.shape[0] * batch.shape[1]
col = self.dim
result = tensor.reshape(batch,
newshape=[row, col],
ndim=2)
return space._cast(result, space.dtype)
elif isinstance(space, space.Conv2DSpace):
result = batch.dimshuffle(1, 0, 'x', 2)
b01c_shape = [result.shape[0],
space.shape[0],
space.shape[1],
space.num_channels]
result = result.flatten()
result = tensor.reshape(result,
newshape=b01c_shape,
ndim=4)
return space._cast(result, space.dtype)
else:
print('Unexpected space', space)
raise NotImplementedError
@wraps(space.Space._check_sizes)
|
BSD 3-Clause New or Revised License
|
discord-modmail/modmail
|
modmail/utils/cogs.py
|
BitwiseAutoEnum._generate_next_value_
|
python
|
def _generate_next_value_(name, start, count, last_values) -> int:
return 1 << count
|
Override default enum auto() counter to return increasing powers of 2, 4, 8...
|
https://github.com/discord-modmail/modmail/blob/4aa80915dd370e7db55ec7ac039118a3b025f080/modmail/utils/cogs.py#L10-L12
|
from dataclasses import dataclass
from enum import IntEnum, auto
from discord.ext import commands
class BitwiseAutoEnum(IntEnum):
|
MIT License
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/pymdownx/emoji.py
|
EmojiPattern._get_category
|
python
|
def _get_category(self, emoji):
return emoji.get('category')
|
Get the category.
|
https://github.com/facelessuser/sublime-markdown-popups/blob/aeb7586da26fe46b7764cf1e2832336bc306195d/st3/mdpopups/pymdownx/emoji.py#L310-L313
|
from ..markdown import Extension
from ..markdown.inlinepatterns import InlineProcessor
from ..markdown import util as md_util
import xml.etree.ElementTree as etree
import inspect
import copy
import warnings
from . import util
RE_EMOJI = r'(:[+\-\w]+:)'
SUPPORTED_INDEXES = ('emojione', 'gemoji', 'twemoji')
UNICODE_VARIATION_SELECTOR_16 = 'fe0f'
EMOJIONE_SVG_CDN = 'https://cdnjs.cloudflare.com/ajax/libs/emojione/2.2.7/assets/svg/'
EMOJIONE_PNG_CDN = 'https://cdnjs.cloudflare.com/ajax/libs/emojione/2.2.7/assets/png/'
TWEMOJI_SVG_CDN = 'https://twemoji.maxcdn.com/v/latest/svg/'
TWEMOJI_PNG_CDN = 'https://twemoji.maxcdn.com/v/latest/72x72/'
GITHUB_UNICODE_CDN = 'https://github.githubassets.com/images/icons/emoji/unicode/'
GITHUB_CDN = 'https://github.githubassets.com/images/icons/emoji/'
NO_TITLE = 'none'
LONG_TITLE = 'long'
SHORT_TITLE = 'short'
VALID_TITLE = (LONG_TITLE, SHORT_TITLE, NO_TITLE)
UNICODE_ENTITY = 'html_entity'
UNICODE_ALT = ('unicode', UNICODE_ENTITY)
LEGACY_ARG_COUNT = 8
MSG_INDEX_WARN = """Using emoji indexes with no arguments is now deprecated.
Emoji indexes now take 2 arguments: 'options' and 'md'.
Please update your custom index accordingly.
"""
def add_attriubtes(options, attributes):
attr = options.get('attributes', {})
if attr:
for k, v in attr.items():
attributes[k] = v
def emojione(options, md):
from . import emoji1_db as emoji_map
return {
"name": emoji_map.name,
"emoji": copy.deepcopy(emoji_map.emoji),
"aliases": copy.deepcopy(emoji_map.aliases)
}
def gemoji(options, md):
from . import gemoji_db as emoji_map
return {
"name": emoji_map.name,
"emoji": copy.deepcopy(emoji_map.emoji),
"aliases": copy.deepcopy(emoji_map.aliases)
}
def twemoji(options, md):
from . import twemoji_db as emoji_map
return {
"name": emoji_map.name,
"emoji": copy.deepcopy(emoji_map.emoji),
"aliases": copy.deepcopy(emoji_map.aliases)
}
def to_png(index, shortname, alias, uc, alt, title, category, options, md):
if index == 'gemoji':
def_image_path = GITHUB_UNICODE_CDN
def_non_std_image_path = GITHUB_CDN
elif index == 'twemoji':
def_image_path = TWEMOJI_PNG_CDN
def_image_path = TWEMOJI_PNG_CDN
else:
def_image_path = EMOJIONE_PNG_CDN
def_non_std_image_path = EMOJIONE_PNG_CDN
is_unicode = uc is not None
classes = options.get('classes', index)
if is_unicode:
image_path = options.get('image_path', def_image_path)
else:
image_path = options.get('non_standard_image_path', def_non_std_image_path)
src = "%s%s.png" % (
image_path,
uc if is_unicode else shortname[1:-1]
)
attributes = {
"class": classes,
"alt": alt,
"src": src
}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
return etree.Element("img", attributes)
def to_svg(index, shortname, alias, uc, alt, title, category, options, md):
if index == 'twemoji':
svg_path = TWEMOJI_SVG_CDN
else:
svg_path = EMOJIONE_SVG_CDN
attributes = {
"class": options.get('classes', index),
"alt": alt,
"src": "%s%s.svg" % (
options.get('image_path', svg_path),
uc
)
}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
return etree.Element("img", attributes)
def to_png_sprite(index, shortname, alias, uc, alt, title, category, options, md):
attributes = {
"class": '%(class)s-%(size)s-%(category)s _%(unicode)s' % {
"class": options.get('classes', index),
"size": options.get('size', '64'),
"category": (category if category else ''),
"unicode": uc
}
}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
el = etree.Element("span", attributes)
el.text = md_util.AtomicString(alt)
return el
def to_svg_sprite(index, shortname, alias, uc, alt, title, category, options, md):
xlink_href = '%s#emoji-%s' % (
options.get('image_path', './../assets/sprites/emojione.sprites.svg'), uc
)
svg = etree.Element("svg", {"class": options.get('classes', index)})
desc = etree.SubElement(svg, 'description')
desc.text = md_util.AtomicString(alt)
etree.SubElement(svg, 'use', {'xlink:href': xlink_href})
return svg
def to_alt(index, shortname, alias, uc, alt, title, category, options, md):
return md.htmlStash.store(alt)
class EmojiPattern(InlineProcessor):
def __init__(self, pattern, config, md):
InlineProcessor.__init__(self, pattern, md)
title = config['title']
alt = config['alt']
self.options = config['options']
self._set_index(config["emoji_index"])
self.unicode_alt = alt in UNICODE_ALT
self.encoded_alt = alt == UNICODE_ENTITY
self.remove_var_sel = config['remove_variation_selector']
self.title = title if title in VALID_TITLE else NO_TITLE
self.generator = config['emoji_generator']
def _set_index(self, index):
if len(inspect.getfullargspec(index).args):
self.emoji_index = index(self.options, self.md)
else:
warnings.warn(MSG_INDEX_WARN, util.PymdownxDeprecationWarning)
self.emoji_index = index()
def _remove_variation_selector(self, value):
return value.replace('-' + UNICODE_VARIATION_SELECTOR_16, '')
def _get_unicode_char(self, value):
return ''.join([util.get_char(int(c, 16)) for c in value.split('-')])
def _get_unicode(self, emoji):
uc = emoji.get('unicode')
uc_alt = emoji.get('unicode_alt', uc)
if uc_alt and self.remove_var_sel:
uc_alt = self._remove_variation_selector(uc_alt)
return uc, uc_alt
def _get_title(self, shortname, emoji):
if self.title == LONG_TITLE:
title = emoji['name']
elif self.title == SHORT_TITLE:
title = shortname
else:
title = None
return title
def _get_alt(self, shortname, uc_alt):
if uc_alt is None or not self.unicode_alt:
alt = shortname
else:
alt = self._get_unicode_char(uc_alt)
if self.encoded_alt:
alt = ''.join(
[md_util.AMP_SUBSTITUTE + ('#x%04x;' % util.get_ord(point)) for point in util.get_code_points(alt)]
)
return alt
|
MIT License
|
python-useful-helpers/logwrap
|
setup.py
|
AllowFailRepair.run
|
python
|
def run(self) -> None:
try:
build_ext.build_ext.run(self)
build_dir = os.path.abspath(self.build_lib)
root_dir = os.path.abspath(os.path.join(__file__, ".."))
target_dir = build_dir if not self.inplace else root_dir
src_file = os.path.join(PACKAGE_NAME, "__init__.py")
src = os.path.join(root_dir, src_file)
dst = os.path.join(target_dir, src_file)
if src != dst:
shutil.copyfile(src, dst)
except (
distutils.errors.DistutilsPlatformError,
FileNotFoundError,
) as exc:
raise BuildFailed() from exc
|
Run.
:raises BuildFailed: Build is failed and clean python code should be used.
|
https://github.com/python-useful-helpers/logwrap/blob/eb88b70310c723f6ae4cbc655a7f8952423f6142/setup.py#L89-L113
|
from __future__ import annotations
import ast
import distutils.errors
import os.path
import shutil
import sys
import typing
from distutils.command import build_ext
import setuptools
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
PACKAGE_NAME = "logwrap"
with open(os.path.join(os.path.dirname(__file__), PACKAGE_NAME, "__init__.py")) as f:
SOURCE = f.read()
with open("requirements.txt") as f:
REQUIRED = f.read().splitlines()
with open("README.rst") as f:
LONG_DESCRIPTION = f.read()
if cythonize is not None:
if "win32" != sys.platform:
REQUIRES_OPTIMIZATION = [
setuptools.Extension("logwrap.repr_utils", ["logwrap/repr_utils.pyx"]),
setuptools.Extension("logwrap.log_wrap", ["logwrap/log_wrap.pyx"]),
]
INTERFACES = ["log_wrap.pxd", "repr_utils.pxd"]
else:
REQUIRES_OPTIMIZATION = [
setuptools.Extension("logwrap.repr_utils", ["logwrap/repr_utils.pyx"]),
]
INTERFACES = ["repr_utils.pxd"]
EXT_MODULES = cythonize(
module_list=REQUIRES_OPTIMIZATION,
compiler_directives={
"always_allow_keywords": True,
"binding": True,
"embedsignature": True,
"overflowcheck": True,
"language_level": 3,
},
)
else:
REQUIRES_OPTIMIZATION = []
INTERFACES = []
EXT_MODULES = []
class BuildFailed(Exception):
class AllowFailRepair(build_ext.build_ext):
|
Apache License 2.0
|
bitmovin/bitmovin-api-sdk-python
|
bitmovin_api_sdk/models/muxing_information_video_track.py
|
MuxingInformationVideoTrack.__eq__
|
python
|
def __eq__(self, other):
if not isinstance(other, MuxingInformationVideoTrack):
return False
return self.__dict__ == other.__dict__
|
Returns true if both objects are equal
|
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/muxing_information_video_track.py#L550-L555
|
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
import pprint
import six
class MuxingInformationVideoTrack(object):
@poscheck_model
def __init__(self,
index=None,
codec=None,
codec_iso=None,
bit_rate=None,
rate=None,
pixel_format=None,
frame_mode=None,
frame_width=None,
frame_height=None,
frame_rate=None,
start_time=None,
duration=None,
number_of_frames=None,
aspect_ratio=None):
self._index = None
self._codec = None
self._codec_iso = None
self._bit_rate = None
self._rate = None
self._pixel_format = None
self._frame_mode = None
self._frame_width = None
self._frame_height = None
self._frame_rate = None
self._start_time = None
self._duration = None
self._number_of_frames = None
self._aspect_ratio = None
self.discriminator = None
if index is not None:
self.index = index
if codec is not None:
self.codec = codec
if codec_iso is not None:
self.codec_iso = codec_iso
if bit_rate is not None:
self.bit_rate = bit_rate
if rate is not None:
self.rate = rate
if pixel_format is not None:
self.pixel_format = pixel_format
if frame_mode is not None:
self.frame_mode = frame_mode
if frame_width is not None:
self.frame_width = frame_width
if frame_height is not None:
self.frame_height = frame_height
if frame_rate is not None:
self.frame_rate = frame_rate
if start_time is not None:
self.start_time = start_time
if duration is not None:
self.duration = duration
if number_of_frames is not None:
self.number_of_frames = number_of_frames
if aspect_ratio is not None:
self.aspect_ratio = aspect_ratio
@property
def openapi_types(self):
types = {
'index': 'int',
'codec': 'string_types',
'codec_iso': 'string_types',
'bit_rate': 'int',
'rate': 'int',
'pixel_format': 'string_types',
'frame_mode': 'string_types',
'frame_width': 'int',
'frame_height': 'int',
'frame_rate': 'string_types',
'start_time': 'float',
'duration': 'float',
'number_of_frames': 'int',
'aspect_ratio': 'string_types'
}
return types
@property
def attribute_map(self):
attributes = {
'index': 'index',
'codec': 'codec',
'codec_iso': 'codecIso',
'bit_rate': 'bitRate',
'rate': 'rate',
'pixel_format': 'pixelFormat',
'frame_mode': 'frameMode',
'frame_width': 'frameWidth',
'frame_height': 'frameHeight',
'frame_rate': 'frameRate',
'start_time': 'startTime',
'duration': 'duration',
'number_of_frames': 'numberOfFrames',
'aspect_ratio': 'aspectRatio'
}
return attributes
@property
def index(self):
return self._index
@index.setter
def index(self, index):
if index is not None:
if not isinstance(index, int):
raise TypeError("Invalid type for `index`, type has to be `int`")
self._index = index
@property
def codec(self):
return self._codec
@codec.setter
def codec(self, codec):
if codec is not None:
if not isinstance(codec, string_types):
raise TypeError("Invalid type for `codec`, type has to be `string_types`")
self._codec = codec
@property
def codec_iso(self):
return self._codec_iso
@codec_iso.setter
def codec_iso(self, codec_iso):
if codec_iso is not None:
if not isinstance(codec_iso, string_types):
raise TypeError("Invalid type for `codec_iso`, type has to be `string_types`")
self._codec_iso = codec_iso
@property
def bit_rate(self):
return self._bit_rate
@bit_rate.setter
def bit_rate(self, bit_rate):
if bit_rate is not None:
if not isinstance(bit_rate, int):
raise TypeError("Invalid type for `bit_rate`, type has to be `int`")
self._bit_rate = bit_rate
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, rate):
if rate is not None:
if not isinstance(rate, int):
raise TypeError("Invalid type for `rate`, type has to be `int`")
self._rate = rate
@property
def pixel_format(self):
return self._pixel_format
@pixel_format.setter
def pixel_format(self, pixel_format):
if pixel_format is not None:
if not isinstance(pixel_format, string_types):
raise TypeError("Invalid type for `pixel_format`, type has to be `string_types`")
self._pixel_format = pixel_format
@property
def frame_mode(self):
return self._frame_mode
@frame_mode.setter
def frame_mode(self, frame_mode):
if frame_mode is not None:
if not isinstance(frame_mode, string_types):
raise TypeError("Invalid type for `frame_mode`, type has to be `string_types`")
self._frame_mode = frame_mode
@property
def frame_width(self):
return self._frame_width
@frame_width.setter
def frame_width(self, frame_width):
if frame_width is not None:
if not isinstance(frame_width, int):
raise TypeError("Invalid type for `frame_width`, type has to be `int`")
self._frame_width = frame_width
@property
def frame_height(self):
return self._frame_height
@frame_height.setter
def frame_height(self, frame_height):
if frame_height is not None:
if not isinstance(frame_height, int):
raise TypeError("Invalid type for `frame_height`, type has to be `int`")
self._frame_height = frame_height
@property
def frame_rate(self):
return self._frame_rate
@frame_rate.setter
def frame_rate(self, frame_rate):
if frame_rate is not None:
if not isinstance(frame_rate, string_types):
raise TypeError("Invalid type for `frame_rate`, type has to be `string_types`")
self._frame_rate = frame_rate
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
if start_time is not None:
if not isinstance(start_time, (float, int)):
raise TypeError("Invalid type for `start_time`, type has to be `float`")
self._start_time = start_time
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, duration):
if duration is not None:
if not isinstance(duration, (float, int)):
raise TypeError("Invalid type for `duration`, type has to be `float`")
self._duration = duration
@property
def number_of_frames(self):
return self._number_of_frames
@number_of_frames.setter
def number_of_frames(self, number_of_frames):
if number_of_frames is not None:
if not isinstance(number_of_frames, int):
raise TypeError("Invalid type for `number_of_frames`, type has to be `int`")
self._number_of_frames = number_of_frames
@property
def aspect_ratio(self):
return self._aspect_ratio
@aspect_ratio.setter
def aspect_ratio(self, aspect_ratio):
if aspect_ratio is not None:
if not isinstance(aspect_ratio, string_types):
raise TypeError("Invalid type for `aspect_ratio`, type has to be `string_types`")
self._aspect_ratio = aspect_ratio
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
|
MIT License
|
yinboc/few-shot-meta-baseline
|
meta-dataset/meta_dataset/trainer.py
|
compute_train_class_proportions
|
python
|
def compute_train_class_proportions(episode, shots, dataset_spec):
num_dataset_classes = len(dataset_spec.images_per_class)
num_images_per_class = [
dataset_spec.get_total_images_per_class(class_id)
for class_id in range(num_dataset_classes)
]
class_ids, _ = tf.unique(episode.train_class_ids)
num_classes = tf.shape(num_images_per_class)[0]
check_valid_inds_op = tf.assert_less(class_ids, num_classes)
with tf.control_dependencies([check_valid_inds_op]):
num_images_per_class = tf.gather(num_images_per_class, class_ids)
class_props = tf.truediv(shots, num_images_per_class)
return class_props
|
Computes the proportion of each class' examples in the support set.
Args:
episode: An EpisodeDataset.
shots: A 1D Tensor whose length is the `way' of the episode that stores the
shots for this episode.
dataset_spec: A DatasetSpecification.
Returns:
class_props: A 1D Tensor whose length is the `way' of the episode, storing
for each class the proportion of its examples that are in the support set.
|
https://github.com/yinboc/few-shot-meta-baseline/blob/779fae39dad3537e7c801049c858923e2a352dfe/meta-dataset/meta_dataset/trainer.py#L146-L179
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import logging
import gin.tf
from meta_dataset import learner
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import learning_spec
from meta_dataset.data import pipeline
from meta_dataset.data import providers
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
ENABLE_TF_OPTIMIZATIONS = True
ENABLE_DATA_OPTIMIZATIONS = True
EMBEDDING_KEYWORDS = ('conv', 'resnet')
DATASETS_WITH_EXAMPLE_SPLITS = ()
TF_DATA_OPTIONS = tf.data.Options()
if not ENABLE_DATA_OPTIMIZATIONS:
TF_DATA_OPTIONS.experimental_optimization.apply_default_optimizations = False
NAME_TO_LEARNER = {
'Baseline': learner.BaselineLearner,
'BaselineFinetune': learner.BaselineFinetuneLearner,
'MatchingNet': learner.MatchingNetworkLearner,
'PrototypicalNet': learner.PrototypicalNetworkLearner,
'MAML': learner.MAMLLearner,
'RelationNet': learner.RelationNetworkLearner,
}
BATCH_LEARNER_NAMES = ['Baseline', 'BaselineFinetune']
EPISODIC_LEARNER_NAMES = [
'MatchingNet', 'PrototypicalNet', 'MAML', 'RelationNet'
]
BATCH_LEARNERS = [NAME_TO_LEARNER[name] for name in BATCH_LEARNER_NAMES]
EPISODIC_LEARNERS = [NAME_TO_LEARNER[name] for name in EPISODIC_LEARNER_NAMES]
TRAIN_SPLIT = 'train'
VALID_SPLIT = 'valid'
TEST_SPLIT = 'test'
class UnexpectedSplitError(ValueError):
def __init__(self,
unexpected_split,
expected_splits=(TRAIN_SPLIT, TEST_SPLIT, VALID_SPLIT)):
super(UnexpectedSplitError,
self).__init__('Split must be one of {}, but received `{}`. '.format(
expected_splits, unexpected_split))
@gin.configurable('benchmark')
def get_datasets_and_restrictions(train_datasets='',
eval_datasets='',
restrict_classes=None,
restrict_num_per_class=None):
if restrict_classes is None:
restrict_classes = {}
if restrict_num_per_class is None:
restrict_num_per_class = {}
train_datasets = [d.strip() for d in train_datasets.split(',')]
eval_datasets = [d.strip() for d in eval_datasets.split(',')]
return train_datasets, eval_datasets, restrict_classes, restrict_num_per_class
def apply_dataset_options(dataset):
return dataset.with_options(TF_DATA_OPTIONS)
|
MIT License
|
jmhale/okta-awscli
|
oktaawscli/okta_auth.py
|
OktaAuth.primary_auth
|
python
|
def primary_auth(self):
auth_data = {
"username": self.username,
"password": self.password
}
self.session = requests.Session()
resp = self.session.post(self.auth_url, json=auth_data)
resp_json = resp.json()
self.cookies = resp.cookies
if 'status' in resp_json:
if resp_json['status'] == 'MFA_REQUIRED':
factors_list = resp_json['_embedded']['factors']
state_token = resp_json['stateToken']
mfa_base = OktaAuthMfaBase(self.logger, state_token, self.factor, self.totp_token)
session_token = mfa_base.verify_mfa(factors_list)
elif resp_json['status'] == 'SUCCESS':
session_token = resp_json['sessionToken']
elif resp_json['status'] == 'MFA_ENROLL':
self.logger.warning("""MFA not enrolled. Cannot continue.
Please enroll an MFA factor in the Okta Web UI first!""")
sys.exit(2)
elif resp.status_code != 200:
self.logger.error(resp_json['errorSummary'])
sys.exit(1)
else:
self.logger.error(resp_json)
sys.exit(1)
return session_token
|
Performs primary auth against Okta
|
https://github.com/jmhale/okta-awscli/blob/c31882f53a6c6af5b8c40c0074c7de1636a5d9ce/oktaawscli/okta_auth.py#L43-L74
|
import sys
import re
from codecs import decode
import requests
from bs4 import BeautifulSoup as bs
from oktaawscli.okta_auth_mfa_base import OktaAuthMfaBase
from oktaawscli.okta_auth_mfa_app import OktaAuthMfaApp
from oktaawscli.util import input
class OktaAuth():
def __init__(self, okta_profile, verbose, logger, totp_token,
okta_auth_config, username, password, verify_ssl=True):
self.okta_profile = okta_profile
self.totp_token = totp_token
self.logger = logger
self.verbose = verbose
self.verify_ssl = verify_ssl
self.factor = okta_auth_config.factor_for(okta_profile)
self.app_link = okta_auth_config.app_link_for(okta_profile)
self.okta_auth_config = okta_auth_config
self.session = None
self.session_token = ""
self.session_id = ""
self.https_base_url = "https://%s" % okta_auth_config.base_url_for(okta_profile)
self.auth_url = "%s/api/v1/authn" % self.https_base_url
if username:
self.username = username
else:
self.username = okta_auth_config.username_for(okta_profile)
if password:
self.password = password
else:
self.password = okta_auth_config.password_for(okta_profile)
|
Apache License 2.0
|
darin-costello/spheropy
|
spheropy/DataStream.py
|
DataStreamManager.update
|
python
|
def update(self):
self._update_format()
self._update_list()
|
Updates internals variables to ensure data integrety.
This is called after each property is set.
|
https://github.com/darin-costello/spheropy/blob/8c8f0ccc1c9688b3deeb3df188e1f095bfe791ab/spheropy/DataStream.py#L327-L333
|
from collections import namedtuple
import struct
import math
_ACC_RAW_MASK = 0xE0000000
_GYRO_RAW_MASK = 0x1C000000
_MOTOR_EMF_RAW_MASK = 0x00600000
_MOTOR_PWM_RAW_MASK = 0x00180000
_IMU_ANGLE_MASK = 0x00070000
_ACC_MASK = 0x0000E000
_GYRO_MASK = 0x00001C00
_MOTOR_EMF_MASK = 0x00000060
_QUATERNION_MASK = 0xF0000000
_ODOM_MASK = 0x0C000000
_ACCEL_ONE_MASK = 0x02000000
_VEL_MASK = 0x01800000
_ThreeDimCoord = namedtuple('ThreeDimCoord', ['x', 'y', 'z'])
_RightAndLeft = namedtuple('RightAndLeft', ['right', 'left'])
_LeftAndRight = namedtuple('LeftAndRight', ['left', 'right'])
_Angle = namedtuple('Angle', ['pitch', 'roll', 'yaw'])
_TwoDimCoord = namedtuple('TwoDimCoord', ['x', 'y'])
_Value = namedtuple('Value', ['value'])
_Quaternion = namedtuple('Quaternion', ['x', 'y', 'z', 'w'])
_DataInfo = namedtuple(
'DataInfo', ['name', 'tuple', 'size', 'mask', 'conversion'])
_ACC_RAW_CONV = 4 * 1e-3
_GYRO_RAW_CONV = 0.068 * (math.pi / 180.0)
_MOTOR_EMF_RAW_CONV = 22.5 * 1e-2
_MOTOR_PMW_CONV = 1
_IMU_ANGE_CONV = math.pi / 180.0
_ACC_CONV = (1.0 / 4096.0) * 9.80665
_GYRO_CONV = 0.1 * math.pi / 180.0
_MOTOR_EMF_CONV = 22.5 * 1e-2
_QUATERNION_CONV = 1e-4
_ODOM_CONV = 1e-2
_ACCELONE_CONV = 1e-3 * 9.80665
_VELOCITY_CONV = 1e-3
_ORDER1 = [
_DataInfo('acc_raw', _ThreeDimCoord, 3, _ACC_RAW_MASK, _ACC_CONV),
_DataInfo('gyro_raw', _ThreeDimCoord, 3, _GYRO_RAW_MASK, _GYRO_RAW_CONV),
_DataInfo('motor_emf_raw', _RightAndLeft, 2,
_MOTOR_EMF_RAW_MASK, _MOTOR_EMF_RAW_CONV),
_DataInfo('motor_pwm_raw', _LeftAndRight, 2,
_MOTOR_PWM_RAW_MASK, _MOTOR_EMF_RAW_CONV),
_DataInfo('imu_ange', _Angle, 3, _IMU_ANGLE_MASK, _IMU_ANGE_CONV),
_DataInfo('acc', _ThreeDimCoord, 3, _ACC_MASK, _ACC_CONV),
_DataInfo('gyro', _ThreeDimCoord, 3, _GYRO_MASK, _GYRO_CONV),
_DataInfo('motor_emf', _RightAndLeft, 2, _MOTOR_EMF_MASK, _MOTOR_EMF_CONV),
]
_ORDER2 = [
_DataInfo('quaternion', _Quaternion, 4,
_QUATERNION_MASK, _QUATERNION_CONV),
_DataInfo('odom', _TwoDimCoord, 2, _ODOM_MASK, _ODOM_CONV),
_DataInfo('accel_one', _Value, 1, _ACCEL_ONE_MASK, _ACCELONE_CONV),
_DataInfo('velocity', _TwoDimCoord, 2, _VEL_MASK, _VELOCITY_CONV)
]
class DataStreamManager(object):
def __init__(self, number_frames=1, convert=True):
self._mask1 = 0x00000000
self._mask2 = 0x00000000
self._format = ""
self._tuples = []
self.number_frames = number_frames
self.convert = convert
def _update_mask1(self, value, bitmask):
if value:
self._mask1 |= bitmask
else:
self._mask1 &= (~ bitmask)
self.update()
def _update_mask2(self, value, bitmask):
if value:
self._mask2 |= bitmask
else:
self._mask2 &= (~ bitmask)
self.update()
def copy(self):
stream = DataStreamManager()
stream._mask1 = self._mask1
stream._mask2 = self._mask2
stream.convert = self.convert
stream.number_frames = self.number_frames
stream.update()
return stream
@property
def acc_raw(self):
return bool(self._mask1 & _ACC_RAW_MASK)
@acc_raw.setter
def acc_raw(self, value):
self._update_mask1(value, _ACC_RAW_MASK)
@property
def gyro_raw(self):
return bool(self._mask1 & _GYRO_RAW_MASK)
@gyro_raw.setter
def gyro_raw(self, value):
self._update_mask1(value, _GYRO_RAW_MASK)
@property
def motor_emf_raw(self):
return bool(self._mask1 & _MOTOR_EMF_RAW_MASK)
@motor_emf_raw.setter
def motor_emf_raw(self, value):
self._update_mask1(value, _MOTOR_EMF_RAW_MASK)
@property
def motor_pwm_raw(self):
return bool(self._mask1 & _MOTOR_PWM_RAW_MASK)
@motor_pwm_raw.setter
def motor_pwm_raw(self, value):
self._update_mask1(value, _MOTOR_PWM_RAW_MASK)
@property
def imu_angle(self):
return bool(self._mask1 & _IMU_ANGLE_MASK)
@imu_angle.setter
def imu_angle(self, value):
self._update_mask1(value, _IMU_ANGLE_MASK)
@property
def acc(self):
return bool(self._mask1 & _ACC_MASK)
@acc.setter
def acc(self, value):
self._update_mask1(value, _ACC_MASK)
@property
def gyro(self):
return bool(self._mask1 & _GYRO_MASK)
@gyro.setter
def gyro(self, value):
self._update_mask1(value, _GYRO_MASK)
@property
def motor_emf(self):
return bool(self._mask1 & _MOTOR_EMF_MASK)
@motor_emf.setter
def motor_emf(self, value):
self._update_mask1(value, _MOTOR_EMF_MASK)
@property
def quaternion(self):
return bool(self._mask2 & _QUATERNION_MASK)
@quaternion.setter
def quaternion(self, value):
self._update_mask2(value, _QUATERNION_MASK)
@property
def odom(self):
return bool(self._mask2 & _ODOM_MASK)
@odom.setter
def odom(self, value):
self._update_mask2(value, _ODOM_MASK)
@property
def accel_one(self):
return bool(self._mask2 & _ACCEL_ONE_MASK)
@accel_one.setter
def accel_one(self, value):
self._update_mask2(value, _ACCEL_ONE_MASK)
@property
def velocity(self):
return bool(self._mask2 & _VEL_MASK)
@velocity.setter
def velocity(self, value):
self._update_mask2(value, _VEL_MASK)
def parse(self, data):
expected_items = (len(self._format) - 1) * 2
assert len(data) == expected_items * self.number_frames
buff = buffer(data)
result = []
for frame in range(0, self.number_frames):
data = struct.unpack_from(
self._format, buff, offset=frame * expected_items)
offset = 0
dic = {}
for i in self._tuples:
temp_list = []
for j in range(0, i.size):
to_add = i.conversion * data[offset + j] if self.convert else data[offset + j]
temp_list.append(to_add)
offset += i.size
dic[i.name] = i.tuple._make(temp_list)
result.append(dic)
return result
|
MIT License
|
googlearchive/compute-appengine-demo-suite-python
|
demo-suite/lib/google_cloud/gce.py
|
GceResource.set_defaults
|
python
|
def set_defaults(self):
if not self.name:
self.name = self.gce_project.settings['compute'][self.type]
|
Set any defaults.
|
https://github.com/googlearchive/compute-appengine-demo-suite-python/blob/93d130ee0f23571d209a07d35445c363452525c1/demo-suite/lib/google_cloud/gce.py#L439-L443
|
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import logging
import os
import lib_path
from apiclient import discovery
from apiclient import errors as api_errors
from apiclient import http
from google.appengine.api import memcache
import httplib2
import oauth2client.client as client
try:
import simplejson as json
except ImportError:
import json
import gce_exception as error
API = 'compute'
GCE_URL = 'https://www.googleapis.com/%s' % API
GOOGLE_PROJECT = 'centos-cloud'
class GceProject(object):
def __init__(
self, credentials, project_id=None, zone_name=None, settings=None):
settings_file = os.path.join(
os.path.dirname(__file__), '../../settings.json')
self.settings = json.loads(open(settings_file, 'r').read())
if settings:
self.settings.update(settings)
api_version = self.settings['compute']['api_version']
self.gce_url = '%s/%s' % (GCE_URL, api_version)
auth_http = self._auth_http(credentials)
self.service = discovery.build('compute', api_version, http=auth_http)
self.project_id = project_id
if not self.project_id:
self.project_id = self.settings['project']
self.zone_name = zone_name
if not self.zone_name:
self.zone_name = self.settings['compute']['zone']
def list_instances(self, zone_name=None, **args):
return self._list(Instance, zone_name=zone_name, **args)
def list_firewalls(self, **args):
return self._list(Firewall, **args)
def list_routes(self, **args):
return self._list(Route, **args)
def list_images(self, **args):
return self._list(Image, **args)
def list_disks(self, **args):
return self._list(Disk, **args)
def insert(self, resource):
resource.gce_project = self
request = self._insert_request(resource)
try:
self._run_request(request)
except error.GceError:
raise
except error.GceTokenError:
raise
def bulk_insert(self, resources):
batch = http.BatchHttpRequest()
for resource in resources:
resource.gce_project = self
batch.add(self._insert_request(resource), callback=self._batch_response)
try:
self._run_request(batch)
except error.GceError:
raise
except error.GceTokenError:
raise
def bulk_delete(self, resources):
batch = http.BatchHttpRequest()
for resource in resources:
resource.gce_project = self
batch.add(self._delete_request(resource), callback=self._batch_response)
try:
self._run_request(batch)
except error.GceError:
raise
except error.GceTokenError:
raise
def _list(self, resource_class, zone_name=None, **args):
resources = []
resource = resource_class()
resource.gce_project = self
request = self._list_request(resource, zone_name=zone_name, **args)
while request:
results = {}
try:
results = self._run_request(request)
except error.GceError:
raise
except error.GceTokenError:
raise
for result in results.get('items', []):
new_resource = resource_class()
new_resource.from_json(result)
resources.append(new_resource)
request = resource.service_resource().list_next(
self._list_request(resource, zone_name=zone_name, **args),
results)
return resources
def _insert_request(self, resource):
resource.set_defaults()
params = {'project': self.project_id, 'body': resource.json}
if resource.scope == 'zonal':
params['zone'] = self.zone_name
return resource.service_resource().insert(**params)
def _list_request(self, resource, zone_name=None, **args):
params = {'project': self.project_id}
if args:
params.update(args)
if resource.scope == 'zonal':
if not zone_name:
zone_name = self.zone_name
params['zone'] = zone_name
return resource.service_resource().list(**params)
def _delete_request(self, resource):
resource.set_defaults()
params = {'project': self.project_id, resource.type: resource.name}
if resource.scope == 'zonal':
params['zone'] = self.zone_name
return resource.service_resource().delete(**params)
def _run_request(self, request):
result = {}
try:
result = request.execute()
except httplib2.HttpLib2Error, e:
logging.error(e)
raise error.GceError('Transport Error occurred')
except client.AccessTokenRefreshError, e:
logging.error(e)
raise error.GceTokenError('Access Token refresh error')
except api_errors.BatchError, e:
logging.error(e)
logging.error('BatchError: %s %s' % (e.resp.status, e.content))
if e.resp.status != 200:
raise error.GceError(
'Batch Error: %s %s' % (e.resp.status, e.resp.reason))
except api_errors.HttpError, e:
logging.error(e)
raise error.GceError(
'HttpError: %s %s' % (e.resp.status, e.resp.reason))
return result
def _batch_response(self, request_id, response, exception):
if exception is not None:
logging.error(exception)
logging.error('API Request Error! ' + str(response))
def _auth_http(self, credentials):
http = httplib2.Http(memcache, timeout=30)
auth_http = credentials.authorize(http)
return auth_http
class GceResource(object):
def __init__(self, type, scope):
self.type = type
self.scope = scope
@property
def url(self):
project_id = None
if self.type == 'image':
project_id = self.project_id
else:
project_id = self.gce_project.project_id
if self.scope == 'zonal':
return '%s/projects/%s/zones/%s/%ss/%s' % (
self.gce_project.gce_url,
project_id,
self.zone.name,
self.type,
self.name)
if self.scope == 'global':
return '%s/projects/%s/global/%ss/%s' % (
self.gce_project.gce_url,
project_id,
self.type,
self.name)
|
Apache License 2.0
|
partho-maple/coding-interview-gym
|
leetcode.com/python/105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py
|
Solution.buildTree
|
python
|
def buildTree(self, preorder, inorder):
if len(inorder) == 0:
return None
rootIdxIntoInorder = inorder.index(preorder[0])
root = TreeNode(preorder[0])
root.left = self.buildTree(preorder[1:], inorder[:rootIdxIntoInorder])
root.right = self.buildTree(preorder[rootIdxIntoInorder + 1:], inorder[rootIdxIntoInorder + 1:])
return root
|
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
|
https://github.com/partho-maple/coding-interview-gym/blob/f11c78b6e42d1014296fc0f360aa6fc530600493/leetcode.com/python/105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py#L9-L21
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
|
MIT License
|
denisio/seno-blockchain
|
seno/util/condition_tools.py
|
parse_sexp_to_condition
|
python
|
def parse_sexp_to_condition(
sexp: Program,
) -> Tuple[Optional[Err], Optional[ConditionWithArgs]]:
as_atoms = sexp.as_atom_list()
if len(as_atoms) < 1:
return Err.INVALID_CONDITION, None
opcode = as_atoms[0]
try:
opcode = ConditionOpcode(opcode)
except ValueError:
opcode = ConditionOpcode.UNKNOWN
return None, ConditionWithArgs(opcode, as_atoms[1:])
|
Takes a ChiaLisp sexp and returns a ConditionWithArgs.
If it fails, returns an Error
|
https://github.com/denisio/seno-blockchain/blob/c3c20dcd95d4087870f077e37a8a011c6016da2f/seno/util/condition_tools.py#L19-L36
|
from typing import Dict, List, Optional, Tuple, Set
from blspy import G1Element
from seno.types.announcement import Announcement
from seno.types.blockchain_format.coin import Coin
from seno.types.blockchain_format.program import Program, SerializedProgram
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.types.condition_opcodes import ConditionOpcode
from seno.types.condition_with_args import ConditionWithArgs
from seno.util.clvm import int_from_bytes
from seno.util.errors import ConsensusError, Err
from seno.util.ints import uint64
|
Apache License 2.0
|
doist/todoist-python
|
todoist/managers/filters.py
|
FiltersManager.delete
|
python
|
def delete(self, filter_id):
cmd = {
"type": "filter_delete",
"uuid": self.api.generate_uuid(),
"args": {"id": filter_id},
}
self.queue.append(cmd)
|
Deletes a filter remotely.
|
https://github.com/doist/todoist-python/blob/d564db81932771541c3a718698f8018756c6091d/todoist/managers/filters.py#L41-L50
|
from .. import models
from .generic import AllMixin, GetByIdMixin, Manager, SyncMixin
class FiltersManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = "filters"
object_type = "filter"
def add(self, name, query, **kwargs):
obj = models.Filter({"name": name, "query": query}, self.api)
obj.temp_id = obj["id"] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
"type": "filter_add",
"temp_id": obj.temp_id,
"uuid": self.api.generate_uuid(),
"args": {key: obj.data[key] for key in obj.data if key != "id"},
}
self.queue.append(cmd)
return obj
def update(self, filter_id, **kwargs):
args = {"id": filter_id}
args.update(kwargs)
cmd = {
"type": "filter_update",
"uuid": self.api.generate_uuid(),
"args": args,
}
self.queue.append(cmd)
|
MIT License
|
holoviz/panel
|
panel/util.py
|
isdatetime
|
python
|
def isdatetime(value):
if is_series(value) and len(value):
return isinstance(value.iloc[0], datetime_types)
elif isinstance(value, np.ndarray):
return (value.dtype.kind == "M" or
(value.dtype.kind == "O" and len(value) and
isinstance(value[0], datetime_types)))
elif isinstance(value, list):
return all(isinstance(d, datetime_types) for d in value)
else:
return isinstance(value, datetime_types)
|
Whether the array or scalar is recognized datetime type.
|
https://github.com/holoviz/panel/blob/c35200e885942a68ff152f7b3180e0aea72eafc4/panel/util.py#L245-L258
|
import base64
import datetime as dt
import inspect
import json
import numbers
import os
import re
import sys
import urllib.parse as urlparse
from collections.abc import MutableSequence, MutableMapping
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
from html import escape
from importlib import import_module
from six import string_types
import bokeh
import param
import numpy as np
datetime_types = (np.datetime64, dt.datetime, dt.date)
if sys.version_info.major > 2:
unicode = str
bokeh_version = LooseVersion(bokeh.__version__)
def isfile(path):
try:
return os.path.isfile(path)
except ValueError:
return False
def isurl(obj, formats):
if not isinstance(obj, string_types):
return False
lower_string = obj.lower().split('?')[0].split('#')[0]
return (
lower_string.startswith('http://')
or lower_string.startswith('https://')
) and (formats is None or any(lower_string.endswith('.'+fmt) for fmt in formats))
def is_dataframe(obj):
if 'pandas' not in sys.modules:
return False
import pandas as pd
return isinstance(obj, pd.DataFrame)
def is_series(obj):
if 'pandas' not in sys.modules:
return False
import pandas as pd
return isinstance(obj, pd.Series)
def hashable(x):
if isinstance(x, MutableSequence):
return tuple(x)
elif isinstance(x, MutableMapping):
return tuple([(k,v) for k,v in x.items()])
else:
return x
def isIn(obj, objs):
for o in objs:
if o is obj:
return True
try:
if o == obj:
return True
except Exception:
pass
return False
def indexOf(obj, objs):
for i, o in enumerate(objs):
if o is obj:
return i
try:
if o == obj:
return i
except Exception:
pass
raise ValueError('%s not in list' % obj)
def as_unicode(obj):
if sys.version_info.major < 3 and isinstance(obj, str):
obj = obj.decode('utf-8')
return unicode(obj)
def param_name(name):
match = re.findall(r'\D+(\d{5,})', name)
return name[:name.index(match[0])] if match else name
def unicode_repr(obj):
if sys.version_info.major == 2 and isinstance(obj, unicode):
return repr(obj)[1:]
return repr(obj)
def recursive_parameterized(parameterized, objects=None):
objects = [] if objects is None else objects
objects.append(parameterized)
for p in parameterized.param.values().values():
if isinstance(p, param.Parameterized) and not any(p is o for o in objects):
recursive_parameterized(p, objects)
return objects
def abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')):
if isinstance(value, list):
vrepr = '[' + ', '.join([abbreviated_repr(v) for v in value]) + ']'
if isinstance(value, param.Parameterized):
vrepr = type(value).__name__
else:
vrepr = repr(value)
if len(vrepr) > max_length:
abbrev = vrepr[max_length//2:]
natural_break = None
for brk in natural_breaks:
if brk in abbrev:
natural_break = abbrev.index(brk) + max_length//2
break
if natural_break and natural_break < max_length:
max_length = natural_break + 1
end_char = ''
if isinstance(value, list):
end_char = ']'
elif isinstance(value, OrderedDict):
end_char = '])'
elif isinstance(value, (dict, set)):
end_char = '}'
return vrepr[:max_length+1] + '...' + end_char
return vrepr
def param_reprs(parameterized, skip=None):
cls = type(parameterized).__name__
param_reprs = []
for p, v in sorted(parameterized.param.values().items()):
default = parameterized.param[p].default
equal = v is default
if not equal:
if isinstance(v, np.ndarray):
if isinstance(default, np.ndarray):
equal = np.array_equal(v, default, equal_nan=True)
else:
equal = False
else:
try:
equal = bool(v==default)
except Exception:
equal = False
if equal: continue
elif v is None: continue
elif isinstance(v, string_types) and v == '': continue
elif isinstance(v, list) and v == []: continue
elif isinstance(v, dict) and v == {}: continue
elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue
else: v = abbreviated_repr(v)
param_reprs.append('%s=%s' % (p, v))
return param_reprs
def full_groupby(l, key=lambda x: x):
d = defaultdict(list)
for item in l:
d[key(item)].append(item)
return d.items()
def get_method_owner(meth):
if inspect.ismethod(meth):
if sys.version_info < (3,0):
return meth.im_class if meth.im_self is None else meth.im_self
else:
return meth.__self__
def is_parameterized(obj):
return (isinstance(obj, param.Parameterized) or
(isinstance(obj, type) and issubclass(obj, param.Parameterized)))
|
BSD 3-Clause New or Revised License
|
alan-turing-institute/sktime
|
sktime/datasets/_data_io.py
|
load_japanese_vowels
|
python
|
def load_japanese_vowels(split=None, return_X_y=False):
name = "JapaneseVowels"
return _load_dataset(name, split, return_X_y)
|
Load the JapaneseVowels time series classification problem.
Parameters
----------
split: None or str{"train", "test"}, optional (default=None)
Whether to load the train or test partition of the problem. By
default it loads both.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a
single dataframe with columns for features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Notes
-----
Dimensionality: multivariate, 12
Series length: 7-29
Train cases: 270
Test cases: 370
Number of classes: 9
A UCI Archive dataset. 9 Japanese-male speakers were recorded saying
the vowels 'a' and 'e'. A '12-degree
linear prediction analysis' is applied to the raw recordings to
obtain time-series with 12 dimensions and series lengths between 7 and 29.
The classification task is to predict the speaker. Therefore,
each instance is a transformed utterance,
12*29 values with a single class label attached, [1...9]. The given
training set is comprised of 30
utterances for each speaker, however the test set has a varied
distribution based on external factors of
timing and experimental availability, between 24 and 88 instances per
speaker. Reference: M. Kudo, J. Toyama
and M. Shimbo. (1999). "Multidimensional Curve Classification Using
Passing-Through Regions". Pattern
Recognition Letters, Vol. 20, No. 11--13, pages 1103--1111.
Dataset details: http://timeseriesclassification.com/description.php
?Dataset=JapaneseVowels
|
https://github.com/alan-turing-institute/sktime/blob/b09d2db201f5380907088c6ffce036eab7083327/sktime/datasets/_data_io.py#L388-L435
|
import os
import shutil
import tempfile
import zipfile
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from sktime.utils.data_io import load_from_tsfile_to_dataframe
__all__ = [
"load_airline",
"load_arrow_head",
"load_gunpoint",
"load_osuleaf",
"load_italy_power_demand",
"load_basic_motions",
"load_japanese_vowels",
"load_shampoo_sales",
"load_longley",
"load_lynx",
"load_acsf1",
"load_uschange",
"load_UCR_UEA_dataset",
"load_PBS_dataset",
]
__author__ = [
"mloning",
"sajaysurya",
"big-o",
"SebasKoel",
"Emiliathewolf",
"TonyBagnall",
"yairbeer",
]
DIRNAME = "data"
MODULE = os.path.dirname(__file__)
def _download_and_extract(url, extract_path=None):
file_name = os.path.basename(url)
dl_dir = tempfile.mkdtemp()
zip_file_name = os.path.join(dl_dir, file_name)
urlretrieve(url, zip_file_name)
if extract_path is None:
extract_path = os.path.join(MODULE, "data/%s/" % file_name.split(".")[0])
else:
extract_path = os.path.join(extract_path, "%s/" % file_name.split(".")[0])
try:
if not os.path.exists(extract_path):
os.makedirs(extract_path)
zipfile.ZipFile(zip_file_name, "r").extractall(extract_path)
shutil.rmtree(dl_dir)
return extract_path
except zipfile.BadZipFile:
shutil.rmtree(dl_dir)
if os.path.exists(extract_path):
shutil.rmtree(extract_path)
raise zipfile.BadZipFile(
"Could not unzip dataset. Please make sure the URL is valid."
)
def _list_downloaded_datasets(extract_path):
if extract_path is None:
data_dir = os.path.join(MODULE, DIRNAME)
else:
data_dir = extract_path
datasets = [
path
for path in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, path))
]
return datasets
def load_UCR_UEA_dataset(name, split=None, return_X_y=False, extract_path=None):
return _load_dataset(name, split, return_X_y, extract_path)
def _load_dataset(name, split, return_X_y, extract_path=None):
if extract_path is not None:
local_module = os.path.dirname(extract_path)
local_dirname = extract_path
else:
local_module = MODULE
local_dirname = DIRNAME
if not os.path.exists(os.path.join(local_module, local_dirname)):
os.makedirs(os.path.join(local_module, local_dirname))
if name not in _list_downloaded_datasets(extract_path):
url = "http://timeseriesclassification.com/Downloads/%s.zip" % name
try:
_download_and_extract(
url,
extract_path=extract_path,
)
except zipfile.BadZipFile as e:
raise ValueError(
"Invalid dataset name. ",
extract_path,
"Please make sure the dataset "
+ "is available on http://timeseriesclassification.com/.",
) from e
if isinstance(split, str):
split = split.upper()
if split in ("TRAIN", "TEST"):
fname = name + "_" + split + ".ts"
abspath = os.path.join(local_module, local_dirname, name, fname)
X, y = load_from_tsfile_to_dataframe(abspath)
elif split is None:
X = pd.DataFrame(dtype="object")
y = pd.Series(dtype="object")
for split in ("TRAIN", "TEST"):
fname = name + "_" + split + ".ts"
abspath = os.path.join(local_module, local_dirname, name, fname)
result = load_from_tsfile_to_dataframe(abspath)
X = pd.concat([X, pd.DataFrame(result[0])])
y = pd.concat([y, pd.Series(result[1])])
else:
raise ValueError("Invalid `split` value =", split)
if return_X_y:
return X, y
else:
X["class_val"] = pd.Series(y)
return X
def load_gunpoint(split=None, return_X_y=False):
name = "GunPoint"
return _load_dataset(name, split, return_X_y)
def load_osuleaf(split=None, return_X_y=False):
name = "OSULeaf"
return _load_dataset(name, split, return_X_y)
def load_italy_power_demand(split=None, return_X_y=False):
name = "ItalyPowerDemand"
return _load_dataset(name, split, return_X_y)
def load_unit_test(split=None, return_X_y=False):
name = "UnitTest"
return _load_dataset(name, split, return_X_y)
|
BSD 3-Clause New or Revised License
|
pcattori/maps
|
maps/__init__.py
|
namedfrozen
|
python
|
def namedfrozen(typename, fields, defaults={}):
return NamedFrozenMapMeta(typename, fields, defaults)
|
Creates a new class that inherits from :class:`maps.FrozenMap` that has the
specified fields as keys. Fields are accessible via bracket-notation
(i.e. ``__getitem__``) as well as dot-notation (i.e. ``__getattr__``).
Instances of the returned class are immutable.
:param str typename: Name of the new Map class
:param iterable fields: Names of the fields
:param mapping defaults: Maps default values to fields
:raises ValueError: if the type name or field names or defaults provided are not properly formatted
:return: The newly created class
:rtype: class
Usage::
>>> import maps
>>> RGB = maps.namedfrozen('RGB', ['red', 'green', 'blue'], defaults={'green': 127, 'blue': 80})
>>> coral = RGB(255)
>>> coral['red']
255
>>> coral.green
127
|
https://github.com/pcattori/maps/blob/bccd827e1a7ed946a28c9098e53d732b4d85fdbd/maps/__init__.py#L7-L30
|
from maps.fixedkeymap import FixedKeyMap
from maps.frozenmap import FrozenMap
from maps.nameddict import NamedDict
from maps.namedfixedkeymap import NamedFixedKeyMapMeta
from maps.namedfrozenmap import NamedFrozenMapMeta
|
MIT License
|
home-assistant-libs/pytradfri
|
pytradfri/gateway.py
|
Gateway.get_groups
|
python
|
def get_groups(self):
def process_result(result):
return [self.get_group(group) for group in result]
return Command("get", [ROOT_GROUPS], process_result=process_result)
|
Return the groups linked to the gateway.
Returns a Command.
|
https://github.com/home-assistant-libs/pytradfri/blob/ee51bde64deb7c9e3f4a97d01ee7591da7732314/pytradfri/gateway.py#L92-L102
|
from datetime import datetime
from .command import Command
from .const import (
ATTR_AUTH,
ATTR_COMMISSIONING_MODE,
ATTR_CURRENT_TIME_ISO8601,
ATTR_CURRENT_TIME_UNIX,
ATTR_FIRMWARE_VERSION,
ATTR_FIRST_SETUP,
ATTR_GATEWAY_FACTORY_DEFAULTS,
ATTR_GATEWAY_ID,
ATTR_GATEWAY_INFO,
ATTR_GATEWAY_REBOOT,
ATTR_HOMEKIT_ID,
ATTR_IDENTITY,
ATTR_NTP,
ATTR_PSK,
ROOT_DEVICES,
ROOT_GATEWAY,
ROOT_GROUPS,
ROOT_MOODS,
ROOT_SMART_TASKS,
)
from .device import Device
from .group import Group
from .mood import Mood
from .smart_task import SmartTask
class Gateway:
def generate_psk(self, identity):
def process_result(result):
return result[ATTR_PSK]
return Command(
"post",
[ROOT_GATEWAY, ATTR_AUTH],
{ATTR_IDENTITY: identity},
process_result=process_result,
)
def get_endpoints(self):
def process_result(result):
return [line.split(";")[0][2:-1] for line in result.split(",")]
return Command(
"get",
[".well-known", "core"],
parse_json=False,
process_result=process_result,
)
def get_devices(self):
def process_result(result):
return [self.get_device(dev) for dev in result]
return Command("get", [ROOT_DEVICES], process_result=process_result)
def get_device(self, device_id):
def process_result(result):
return Device(result)
return Command("get", [ROOT_DEVICES, device_id], process_result=process_result)
|
MIT License
|
bruxy70/heating
|
appdaemon/apps/heating-control/heating-control.py
|
HeatingControl.daynight_changed
|
python
|
def daynight_changed(self, entity, attribute, old, new, kwargs):
self.__update_heating()
self.log("updating daynight")
for room in self.__rooms:
if room[ATTR_DAYNIGHT] == entity:
self.log(f"for sensor {room[ATTR_SENSOR]}")
self.__update_thermostats(sensor_entity=room[ATTR_SENSOR])
|
Event handler: day/night changed
|
https://github.com/bruxy70/heating/blob/b8008276400520bdb79139ab50c1504d5494d3e1/appdaemon/apps/heating-control/heating-control.py#L155-L162
|
import appdaemon.plugins.hass.hassapi as hass
from enum import Enum
import voluptuous as vol
import voluptuous_helper as vol_help
from datetime import datetime, time, timedelta
MODE_ON = "on"
MODE_OFF = "off"
MODE_AUTO = "auto"
MODE_ECO = "eco"
MODE_VACATION = "vacation"
HYSTERESIS = 1.0
MIN_TEMPERATURE = 10
LOG_LEVEL = "INFO"
HVAC_HEAT = "heat"
HVAC_OFF = "off"
ATTR_SWITCH_HEATING = "switch_heating"
ATTR_SOMEBODY_HOME = "somebody_home"
ATTR_HEATING_MODE = "heating_mode"
ATTR_TEMPERATURE_VACATION = "temperature_vacation"
ATTR_ROOMS = "rooms"
ATTR_DAYNIGHT = "day_night"
ATTR_TEMPERATURE_DAY = "temperature_day"
ATTR_TEMPERATURE_NIGHT = "temperature_night"
ATTR_SENSOR = "sensor"
ATTR_THERMOSTATS = "thermostats"
ATTR_NAME = "name"
ATTR_CURRENT_TEMP = "current_temperature"
ATTR_HVAC_MODE = "hvac_mode"
ATTR_HVAC_MODES = "hvac_modes"
ATTR_TEMPERATURE = "temperature"
ATTR_UNKNOWN = "unknown"
ATTR_UNAVAILABLE = "unavailable"
class HeatingControl(hass.Hass):
def initialize(self):
ROOM_SCHEMA = vol.Schema(
{
vol.Required(ATTR_SENSOR): vol_help.existing_entity_id(self),
vol.Required(ATTR_DAYNIGHT): vol_help.existing_entity_id(self),
vol.Required(ATTR_TEMPERATURE_DAY): vol_help.existing_entity_id(self),
vol.Required(ATTR_TEMPERATURE_NIGHT): vol_help.existing_entity_id(self),
vol.Required(ATTR_THERMOSTATS): vol.All(
vol_help.ensure_list, [vol_help.existing_entity_id(self)]
),
},
)
APP_SCHEMA = vol.Schema(
{
vol.Required("module"): str,
vol.Required("class"): str,
vol.Required(ATTR_ROOMS): vol.All(vol_help.ensure_list, [ROOM_SCHEMA]),
vol.Required(ATTR_SWITCH_HEATING): vol_help.existing_entity_id(self),
vol.Required(ATTR_SOMEBODY_HOME): vol_help.existing_entity_id(self),
vol.Required(ATTR_TEMPERATURE_VACATION): vol_help.existing_entity_id(
self
),
vol.Required(ATTR_HEATING_MODE): vol_help.existing_entity_id(self),
},
extra=vol.ALLOW_EXTRA,
)
__version__ = "0.0.2"
self.__log_level = LOG_LEVEL
try:
config = APP_SCHEMA(self.args)
except vol.Invalid as err:
self.error(f"Invalid format: {err}", level="ERROR")
return
self.__switch_heating = config.get(ATTR_SWITCH_HEATING)
self.__rooms = config.get(ATTR_ROOMS)
self.__somebody_home = config.get(ATTR_SOMEBODY_HOME)
self.__heating_mode = config.get(ATTR_HEATING_MODE)
self.__temperature_vacation = config.get(ATTR_TEMPERATURE_VACATION)
self.listen_state(self.somebody_home_changed, self.__somebody_home)
self.listen_state(self.heating_changed, self.__switch_heating)
self.listen_state(
self.vacation_temperature_changed, self.__temperature_vacation
)
self.listen_state(self.mode_changed, self.__heating_mode)
sensors = []
thermostats = []
for room in self.__rooms:
self.listen_state(self.daynight_changed, room[ATTR_DAYNIGHT])
self.listen_state(self.target_changed, room[ATTR_TEMPERATURE_DAY])
self.listen_state(self.target_changed, room[ATTR_TEMPERATURE_NIGHT])
if room[ATTR_SENSOR] not in sensors:
sensor = room[ATTR_SENSOR]
sensors.append(sensor)
self.listen_state(self.temperature_changed, sensor)
for thermostat in room[ATTR_THERMOSTATS]:
if thermostat not in thermostats:
thermostats.append(thermostat)
self.listen_state(self.thermostat_changed, thermostat)
self.__update_heating()
self.__update_thermostats()
self.log("Ready for action...")
def mode_changed(self, entity, attribute, old, new, kwargs):
heating = self.is_heating()
self.__update_heating()
if heating == self.is_heating():
self.log("Heating changed, updating thermostats")
self.__update_thermostats()
def heating_changed(self, entity, attribute, old, new, kwargs):
self.__update_thermostats()
def vacation_temperature_changed(self, entity, attribute, old, new, kwargs):
if self.get_mode() == MODE_VACATION:
self.__update_heating()
self.__update_thermostats()
def somebody_home_changed(self, entity, attribute, old, new, kwargs):
if new.lower() == "on":
self.log("Somebody came home.", level=self.__log_level)
elif new.lower() == "off":
self.log("Nobody home.", level=self.__log_level)
self.__update_heating(force=True)
self.__update_thermostats()
def thermostat_changed(self, entity, attribute, old, new, kwargs):
if new is None or new == ATTR_UNKNOWN or new == ATTR_UNAVAILABLE:
self.__update_thermostats(thermostat_entity=entity)
def temperature_changed(self, entity, attribute, old, new, kwargs):
self.__update_heating()
self.__update_thermostats(sensor_entity=entity)
|
MIT License
|
netflix/dispatch
|
src/dispatch/plugins/dispatch_slack/service.py
|
unarchive_conversation
|
python
|
def unarchive_conversation(client: Any, conversation_id: str):
try:
return make_call(client, "conversations.unarchive", channel=conversation_id)
except slack_sdk.errors.SlackApiError as e:
if e.response["error"] != "not_archived":
raise e
|
Unarchives an existing conversation.
|
https://github.com/netflix/dispatch/blob/e30705938e970d8ef0dfdd04246a3f3004a6a44f/src/dispatch/plugins/dispatch_slack/service.py#L287-L294
|
import time
import logging
import functools
import slack_sdk
from slack_sdk.web.async_client import AsyncWebClient
from typing import Any, Dict, List, Optional
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt
from .config import SlackConversationConfiguration
log = logging.getLogger(__name__)
def create_slack_client(config: SlackConversationConfiguration, run_async: bool = False):
if not run_async:
return slack_sdk.WebClient(token=config.api_bot_token.get_secret_value())
return AsyncWebClient(token=config.api_bot_token.get_secret_value())
def resolve_user(client: Any, user_id: str):
if "@" in user_id:
return get_user_info_by_email(client, user_id)
return {"id": user_id}
def chunks(ids, n):
for i in range(0, len(ids), n):
yield ids[i : i + n]
def paginated(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
next_cursor = response["response_metadata"]["next_cursor"]
if not next_cursor:
break
kwargs.update({"cursor": next_cursor})
return results
return decorated_function
return decorator
def time_pagination(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
if not response["has_more"]:
break
kwargs.update({"latest": response["messages"][0]["ts"]})
return results
return decorated_function
return decorator
SLACK_GET_ENDPOINTS = [
"conversations.history",
"conversations.info",
"users.conversations",
"users.info",
"users.lookupByEmail",
"users.profile.get",
]
@retry(stop=stop_after_attempt(5), retry=retry_if_exception_type(TryAgain))
def make_call(client: Any, endpoint: str, **kwargs):
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
if e.response["error"] == "channel_not_found":
raise TryAgain
if e.response["error"] == "user_not_in_channel":
raise TryAgain
if e.response["error"] == "fatal_error":
time.sleep(300)
raise TryAgain
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
async def make_call_async(client: Any, endpoint: str, **kwargs):
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = await client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = await client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
@paginated("channels")
def list_conversations(client: Any, **kwargs):
return make_call(client, "conversations.list", types="private_channel", **kwargs)
def list_conversation_messages(client: Any, conversation_id: str, **kwargs):
return make_call(client, "conversations.history", channel=conversation_id, **kwargs)
@functools.lru_cache()
def get_user_info_by_id(client: Any, user_id: str):
return make_call(client, "users.info", user=user_id)["user"]
async def get_user_info_by_id_async(client: Any, user_id: str):
user_info = await make_call_async(client, "users.info", user=user_id)
return user_info["user"]
@functools.lru_cache()
def get_user_info_by_email(client: Any, email: str):
return make_call(client, "users.lookupByEmail", email=email)["user"]
@functools.lru_cache()
def get_user_profile_by_email(client: Any, email: str):
user = make_call(client, "users.lookupByEmail", email=email)["user"]
profile = make_call(client, "users.profile.get", user=user["id"])["profile"]
profile["tz"] = user["tz"]
return profile
def get_user_email(client: Any, user_id: str):
user_info = get_user_info_by_id(client, user_id)
return user_info["profile"]["email"]
async def get_user_email_async(client: Any, user_id: str):
user_info = await get_user_info_by_id_async(client, user_id)
return user_info["profile"]["email"]
def get_user_username(client: Any, user_id: str):
return get_user_email(client, user_id).split("@")[0]
def get_user_avatar_url(client: Any, email: str):
return get_user_info_by_email(client, email)["profile"]["image_512"]
async def get_conversations_by_user_id_async(client: Any, user_id: str):
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="public_channel",
exclude_archived="true",
)
public_conversations = [c["name"] for c in result["channels"]]
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="private_channel",
exclude_archived="true",
)
private_conversations = [c["name"] for c in result["channels"]]
return public_conversations, private_conversations
def get_conversation_by_name(client: Any, name: str):
for c in list_conversations(client):
if c["name"] == name:
return c
async def get_conversation_name_by_id_async(client: Any, conversation_id: str):
try:
return (await make_call_async(client, "conversations.info", channel=conversation_id))[
"channel"
]["name"]
except slack_sdk.errors.SlackApiError as e:
if e.response["error"] == "channel_not_found":
return None
else:
raise e
def set_conversation_topic(client: Any, conversation_id: str, topic: str):
return make_call(client, "conversations.setTopic", channel=conversation_id, topic=topic)
def create_conversation(client: Any, name: str, is_private: bool = False):
response = make_call(
client,
"conversations.create",
name=name.lower(),
is_group=is_private,
is_private=is_private,
)["channel"]
return {
"id": response["id"],
"name": response["name"],
"weblink": f"https://slack.com/app_redirect?channel={response['id']}",
}
def archive_conversation(client: Any, conversation_id: str):
return make_call(client, "conversations.archive", channel=conversation_id)
|
Apache License 2.0
|
helgeanl/gp-mpc
|
gp_mpc/mpc_class.py
|
MPC.__init__
|
python
|
def __init__(self, horizon, model, gp=None,
Q=None, P=None, R=None, S=None, lam=None, lam_state=None,
ulb=None, uub=None, xlb=None, xub=None, terminal_constraint=None,
feedback=True, percentile=None, gp_method='TA', costFunc='quad', solver_opts=None,
discrete_method='gp', inequality_constraints=None, num_con_par=0,
hybrid=None, Bd=None, Bf=None
):
build_solver_time = -time.time()
dt = model.sampling_time()
Ny, Nu, Np = model.size()
Nx = Nu + Ny
Nt = int(horizon / dt)
self.__dt = dt
self.__Nt = Nt
self.__Ny = Ny
self.__Nx = Nx
self.__Nu = Nu
self.__num_con_par = num_con_par
self.__model = model
self.__hybrid = hybrid
self.__gp = gp
self.__feedback = feedback
self.__discrete_method = discrete_method
""" Default penalty values """
if P is None:
P = np.eye(Ny)
if Q is None:
Q = np.eye(Ny)
if R is None:
R = np.eye(Nu) * 0.01
if S is None:
S = np.eye(Nu) * 0.1
if lam is None:
lam = 1000
self.__Q = Q
self.__P = P
self.__R = R
self.__S = S
self.__Bd = Bd
self.__Bf = Bf
if xub is None:
xub = np.full((Ny), np.inf)
if xlb is None:
xlb = np.full((Ny), -np.inf)
if uub is None:
uub = np.full((Nu), np.inf)
if ulb is None:
ulb = np.full((Nu), -np.inf)
""" Default percentile probability """
if percentile is None:
percentile = 0.95
quantile_x = np.ones(Ny) * norm.ppf(percentile)
quantile_u = np.ones(Nu) * norm.ppf(percentile)
Hx = ca.MX.eye(Ny)
Hu = ca.MX.eye(Nu)
""" Create parameter symbols """
mean_0_s = ca.MX.sym('mean_0', Ny)
mean_ref_s = ca.MX.sym('mean_ref', Ny)
u_0_s = ca.MX.sym('u_0', Nu)
covariance_0_s = ca.MX.sym('covariance_0', Ny * Ny)
K_s = ca.MX.sym('K', Nu * Ny)
P_s = ca.MX.sym('P', Ny * Ny)
con_par = ca.MX.sym('con_par', num_con_par)
param_s = ca.vertcat(mean_0_s, mean_ref_s, covariance_0_s,
u_0_s, K_s, P_s, con_par)
""" Select wich GP function to use """
if discrete_method is 'gp':
self.__gp.set_method(gp_method)
if solver_opts['expand'] is not False and discrete_method is 'exact':
raise TypeError("Can't use exact discrete system with expanded graph")
""" Initialize state variance with the GP noise variance """
if gp is not None:
self.__variance_0 = np.full((Ny), 1e-10)
else:
self.__variance_0 = np.full((Ny), 1e-10)
""" Define which cost function to use """
self.__set_cost_function(costFunc, mean_ref_s, P_s.reshape((Ny, Ny)))
""" Feedback function """
mean_s = ca.MX.sym('mean', Ny)
v_s = ca.MX.sym('v', Nu)
if feedback:
u_func = ca.Function('u', [mean_s, mean_ref_s, v_s, K_s],
[v_s + ca.mtimes(K_s.reshape((Nu, Ny)),
mean_s-mean_ref_s)])
else:
u_func = ca.Function('u', [mean_s, mean_ref_s, v_s, K_s], [v_s])
self.__u_func = u_func
""" Create variables struct """
var = ctools.struct_symMX([(
ctools.entry('mean', shape=(Ny,), repeat=Nt + 1),
ctools.entry('L', shape=(int((Ny**2 - Ny)/2 + Ny),), repeat=Nt + 1),
ctools.entry('v', shape=(Nu,), repeat=Nt),
ctools.entry('eps', shape=(3,), repeat=Nt + 1),
ctools.entry('eps_state', shape=(Ny,), repeat=Nt + 1),
)])
num_slack = 3
num_state_slack = Ny
self.__var = var
self.__num_var = var.size
self.__varlb = var(-np.inf)
self.__varub = var(np.inf)
""" Adjust hard boundries """
for t in range(Nt + 1):
j = Ny
k = 0
for i in range(Ny):
self.__varlb['L', t, k] = 0
k += j
j -= 1
self.__varlb['eps', t] = 0
self.__varlb['eps_state', t] = 0
if xub is not None:
self.__varub['mean', t] = xub
if xlb is not None:
self.__varlb['mean', t] = xlb
if lam_state is None:
self.__varub['eps_state'] = 0
""" Input covariance matrix """
if discrete_method is 'hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
Nz_gp = Ny_gp + Nu_gp
covar_d_sx = ca.SX.sym('cov_d', Ny_gp, Ny_gp)
K_sx = ca.SX.sym('K', Nu, Ny)
covar_u_func = ca.Function('cov_u', [covar_d_sx, K_sx],
[ca.SX(Nu, Nu)])
covar_s = ca.SX(Nz_gp, Nz_gp)
covar_s[:Ny_gp, :Ny_gp] = covar_d_sx
covar_func = ca.Function('covar', [covar_d_sx], [covar_s])
elif discrete_method is 'f_hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
Nz_gp = Ny_gp + Nu_gp
covar_d_sx = ca.SX.sym('cov_d', Ny_gp, Ny_gp)
K_sx = ca.SX.sym('K', Nu, Ny)
covar_u_func = ca.Function('cov_u', [covar_d_sx, K_sx],
[ca.SX(Nu, Nu)])
covar_s = ca.SX(Nz_gp, Nz_gp)
covar_s[:Ny_gp, :Ny_gp] = covar_d_sx
covar_func = ca.Function('covar', [covar_d_sx], [covar_s])
else:
covar_x_s = ca.MX.sym('covar_x', Ny, Ny)
covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
K_sx = ca.SX.sym('K', Nu, Ny)
covar_u_func = ca.Function('cov_u', [covar_x_sx, K_sx],
[K_sx @ covar_x_sx @ K_sx.T])
cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
[covar_x_sx @ K_sx.T])
cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
cov_u = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)
covar_func = ca.Function('covar', [covar_x_s], [covar_s])
""" Hybrid output covariance matrix """
if discrete_method is 'hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
covar_d_sx = ca.SX.sym('covar_d', Ny_gp, Ny_gp)
covar_x_sx = ca.SX.sym('covar_x', Ny, Ny)
u_s = ca.SX.sym('u', Nu)
cov_x_next_s = ca.SX(Ny, Ny)
cov_x_next_s[:Ny_gp, :Ny_gp] = covar_d_sx
covar_x_next_func = ca.Function( 'cov',
[covar_d_sx],
[cov_x_next_s])
""" f_hybrid output covariance matrix """
elif discrete_method is 'f_hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
covar_d_sx = ca.SX.sym('covar_d', Ny_gp, Ny_gp)
covar_x_sx = ca.SX.sym('covar_x', Ny, Ny)
mean_s = ca.SX.sym('mean', Ny)
u_s = ca.SX.sym('u', Nu)
cov_x_next_s = ca.SX(Ny, Ny)
cov_x_next_s[:Ny_gp, :Ny_gp] = covar_d_sx
covar_x_next_func = ca.Function( 'cov',
[mean_s, u_s, covar_d_sx, covar_x_sx],
[cov_x_next_s])
L_s = ca.SX.sym('L', ca.Sparsity.lower(Ny))
L_to_cov_func = ca.Function('cov', [L_s], [L_s @ L_s.T])
covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
cholesky = ca.Function('cholesky', [covar_x_sx], [ca.chol(covar_x_sx).T])
""" Set initial values """
obj = ca.MX(0)
con_eq = []
con_ineq = []
con_ineq_lb = []
con_ineq_ub = []
con_eq.append(var['mean', 0] - mean_0_s)
L_0_s = ca.MX(ca.Sparsity.lower(Ny), var['L', 0])
L_init = cholesky(covariance_0_s.reshape((Ny,Ny)))
con_eq.append(L_0_s.nz[:]- L_init.nz[:])
u_past = u_0_s
""" Build constraints """
for t in range(Nt):
mean_t = var['mean', t]
u_t = u_func(mean_t, mean_ref_s, var['v', t], K_s)
L_x = ca.MX(ca.Sparsity.lower(Ny), var['L', t])
covar_x_t = L_to_cov_func(L_x)
if discrete_method is 'hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
covar_t = covar_func(covar_x_t[:Ny_gp, :Ny_gp])
elif discrete_method is 'd_hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
covar_t = ca.MX(Ny_gp + Nu_gp, Ny_gp + Nu_gp)
elif discrete_method is 'gp':
covar_t = covar_func(covar_x_t)
else:
covar_t = ca.MX(Nx, Nx)
""" Select the chosen integrator """
if discrete_method is 'rk4':
mean_next_pred = model.rk4(mean_t, u_t,[])
covar_x_next_pred = ca.MX(Ny, Ny)
elif discrete_method is 'exact':
mean_next_pred = model.Integrator(x0=mean_t, p=u_t)['xf']
covar_x_next_pred = ca.MX(Ny, Ny)
elif discrete_method is 'd_hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t, covar_t)
mean_next_pred = ca.vertcat(mean_d, hybrid.rk4(mean_t[Ny_gp:],
mean_t[:Ny_gp], []))
covar_x_next_pred = ca.MX(Ny, Ny)
elif discrete_method is 'hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t, covar_t)
mean_next_pred = ca.vertcat(mean_d, hybrid.rk4(mean_t[Ny_gp:],
mean_t[:Ny_gp], []))
covar_x_next_pred = covar_x_next_func(covar_d )
elif discrete_method is 'f_hybrid':
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t, covar_t)
mean_next_pred = ca.vertcat(mean_d, hybrid.rk4(mean_t[Ny_gp:],
mean_t[:Ny_gp], []))
covar_x_next_pred = covar_x_next_func(mean_t, u_t, covar_d,
covar_x_t)
else:
mean_next_pred, covar_x_next_pred = self.__gp.predict(mean_t,
u_t, covar_t)
""" Continuity constraints """
mean_next = var['mean', t + 1]
con_eq.append(mean_next_pred - mean_next )
L_x_next = ca.MX(ca.Sparsity.lower(Ny), var['L', t + 1])
covar_x_next = L_to_cov_func(L_x_next).reshape((Ny*Ny,1))
L_x_next_pred = cholesky(covar_x_next_pred)
con_eq.append(L_x_next_pred.nz[:] - L_x_next.nz[:])
""" Chance state constraints """
cons = self.__constraint(mean_next, L_x_next, Hx, quantile_x, xub,
xlb, var['eps_state',t])
con_ineq.extend(cons['con'])
con_ineq_lb.extend(cons['con_lb'])
con_ineq_ub.extend(cons['con_ub'])
""" Input constraints """
cov_u = ca.MX(Nu, Nu)
if uub is not None:
con_ineq.append(u_t)
con_ineq_ub.extend(uub)
con_ineq_lb.append(np.full((Nu,), -ca.inf))
if ulb is not None:
con_ineq.append(u_t)
con_ineq_ub.append(np.full((Nu,), ca.inf))
con_ineq_lb.append(ulb)
""" Add extra constraints """
if inequality_constraints is not None:
cons = inequality_constraints(var['mean', t + 1],
covar_x_next,
u_t, var['eps', t], con_par)
con_ineq.extend(cons['con_ineq'])
con_ineq_lb.extend(cons['con_ineq_lb'])
con_ineq_ub.extend(cons['con_ineq_ub'])
""" Objective function """
u_delta = u_t - u_past
obj += self.__l_func(var['mean', t], covar_x_t, u_t, cov_u, u_delta) + np.full((1, num_slack),lam) @ var['eps', t]
if lam_state is not None:
obj += np.full((1,num_state_slack),lam_state) @ var['eps_state', t]
u_t = u_past
L_x = ca.MX(ca.Sparsity.lower(Ny), var['L', Nt])
covar_x_t = L_to_cov_func(L_x)
obj += self.__lf_func(var['mean', Nt], covar_x_t, P_s.reshape((Ny, Ny))) + np.full((1, num_slack),lam) @ var['eps', Nt]
if lam_state is not None:
obj += np.full((1,num_state_slack),lam_state) @ var['eps_state', Nt]
num_eq_con = ca.vertcat(*con_eq).size1()
num_ineq_con = ca.vertcat(*con_ineq).size1()
con_eq_lb = np.zeros((num_eq_con,))
con_eq_ub = np.zeros((num_eq_con,))
""" Terminal contraint """
if terminal_constraint is not None and not feedback:
con_ineq.append(var['mean', Nt] - mean_ref_s)
num_ineq_con += Ny
con_ineq_lb.append(np.full((Ny,), - terminal_constraint))
con_ineq_ub.append(np.full((Ny,), terminal_constraint))
elif terminal_constraint is not None and feedback:
con_ineq.append(self.__lf_func(var['mean', Nt],
covar_x_t, P_s.reshape((Ny, Ny))))
num_ineq_con += 1
con_ineq_lb.append(0)
con_ineq_ub.append(terminal_constraint)
con = ca.vertcat(*con_eq, *con_ineq)
self.__conlb = ca.vertcat(con_eq_lb, *con_ineq_lb)
self.__conub = ca.vertcat(con_eq_ub, *con_ineq_ub)
""" Build solver object """
nlp = dict(x=var, f=obj, g=con, p=param_s)
options = {
'ipopt.print_level' : 0,
'ipopt.mu_init' : 0.01,
'ipopt.tol' : 1e-8,
'ipopt.warm_start_init_point' : 'yes',
'ipopt.warm_start_bound_push' : 1e-9,
'ipopt.warm_start_bound_frac' : 1e-9,
'ipopt.warm_start_slack_bound_frac' : 1e-9,
'ipopt.warm_start_slack_bound_push' : 1e-9,
'ipopt.warm_start_mult_bound_push' : 1e-9,
'ipopt.mu_strategy' : 'adaptive',
'print_time' : False,
'verbose' : False,
'expand' : True
}
if solver_opts is not None:
options.update(solver_opts)
self.__solver = ca.nlpsol('mpc_solver', 'ipopt', nlp, options)
self.__var_prediction = np.zeros((Nt + 1, Ny))
self.__mean_prediction = np.zeros((Nt + 1, Ny))
self.__mean = None
build_solver_time += time.time()
print('\n________________________________________')
print('# Time to build mpc solver: %f sec' % build_solver_time)
print('# Number of variables: %d' % self.__num_var)
print('# Number of equality constraints: %d' % num_eq_con)
print('# Number of inequality constraints: %d' % num_ineq_con)
print('----------------------------------------')
|
Initialize and build the MPC solver
# Arguments:
horizon: Prediction horizon with control inputs
model: System model
# Optional Argumants:
gp: GP model
Q: State penalty matrix, default=diag(1,...,1)
P: Termial penalty matrix, default=diag(1,...,1)
if feedback is True, then P is the solution of the DARE,
discarding this option.
R: Input penalty matrix, default=diag(1,...,1)*0.01
S: Input rate of change penalty matrix, default=diag(1,...,1)*0.1
lam: Slack variable penalty for constraints, defalt=1000
lam_state: Slack variable penalty for violation of upper/lower
state boundy, defalt=None
ulb: Lower boundry input
uub: Upper boundry input
xlb: Lower boundry state
xub: Upper boundry state
terminal_constraint: Terminal condition on the state
* if None: No terminal constraint is used
* if zero: Terminal state is equal to zero
* if nonzero: Terminal state is bounded within +/- the constraint
* if not None and feedback is True, then the expected value of
the Lyapunov function E{x^TPx} < terminal_constraint
is used as a terminal constraint.
feedback: If true, use an LQR feedback function u= Kx + v
percentile: Measure how far from the contrain that is allowed,
P(X in constrained set) > percentile,
percentile= 1 - probability of violation,
default=0.95
gp_method: Method of propagating the uncertainty
Possible options:
'TA': Second order Taylor approximation
'ME': Mean equivalent approximation
costFunc: Cost function to use in the objective
'quad': Expected valaue of Quadratic Cost
'sat': Expected value of Saturating cost
solver_opts: Additional options to pass to the NLP solver
e.g.: solver_opts['print_time'] = False
solver_opts['ipopt.tol'] = 1e-8
discrete_method: 'gp' - Gaussian process model
'rk4' - Runga-Kutta 4 Integrator
'exact' - CVODES or IDEAS (for ODEs or DEAs)
'hybrid' - GP model for dynamic equations, and RK4
for kinematic equations
'd_hybrid' - Same as above, without uncertainty
'f_hybrid' - GP estimating modelling errors, with
RK4 computing the the actual model
num_con_par: Number of parameters to pass to the inequality function
inequality_constraints: Additional inequality constraints
Use a function with inputs (x, covar, u, eps) and
that returns a dictionary with inequality constraints and limits.
e.g. cons = dict(con_ineq=con_ineq_array,
con_ineq_lb=con_ineq_lb_array,
con_ineq_ub=con_ineq_ub_array
)
# NOTES:
* Differentiation of Sundails integrators is not supported with SX graph,
meaning that the solver option 'extend_graph' must be set to False
to use MX graph instead when using the 'exact' discrete method.
* At the moment the f_hybrid option is not finished implemented...
|
https://github.com/helgeanl/gp-mpc/blob/0d629b30eb8641221009e28e8591eb7c7f416e10/gp_mpc/mpc_class.py#L20-L526
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
import casadi as ca
import casadi.tools as ctools
from scipy.stats import norm
import scipy.linalg
class MPC:
|
MIT License
|
areed1192/ms-graph-python-client
|
ms_graph/client.py
|
MicrosoftGraphClient.groups
|
python
|
def groups(self) -> Groups:
groups_object: Groups = Groups(session=self.graph_session)
return groups_object
|
Used to access the Groups Services and metadata.
### Returns
---
Groups:
The `Groups` services Object.
|
https://github.com/areed1192/ms-graph-python-client/blob/dad30327f575b3a76cb1b0b7000b2935c16c511a/ms_graph/client.py#L382-L394
|
import json
import msal
import time
import urllib
import random
import string
import pathlib
from typing import List
from typing import Dict
from ms_graph.users import Users
from ms_graph.drives import Drives
from ms_graph.groups import Groups
from ms_graph.notes import Notes
from ms_graph.session import GraphSession
from ms_graph.drive_items import DriveItems
from ms_graph.search import Search
from ms_graph.personal_contacts import PersonalContacts
from ms_graph.mail import Mail
class MicrosoftGraphClient():
RESOURCE = 'https://graph.microsoft.com/'
AUTHORITY_URL = 'https://login.microsoftonline.com/'
AUTH_ENDPOINT = '/oauth2/v2.0/authorize?'
TOKEN_ENDPOINT = '/oauth2/v2.0/token'
OFFICE365_AUTHORITY_URL = 'https://login.live.com'
OFFICE365_AUTH_ENDPOINT = '/oauth20_authorize.srf?'
OFFICE365_TOKEN_ENDPOINT = '/oauth20_token.srf'
def __init__(self, client_id: str, client_secret: str, redirect_uri: str,
scope: List[str], account_type: str = 'consumers',
office365: bool = False, credentials: str = None):
letters = string.ascii_lowercase
self.credentials = credentials
self.token_dict = None
self.client_id = client_id
self.client_secret = client_secret
self.api_version = 'v1.0'
self.account_type = account_type
self.redirect_uri = redirect_uri
self.scope = scope
self.state = ''.join(random.choice(letters) for i in range(10))
self.access_token = None
self.refresh_token = None
self.graph_session = None
self.id_token = None
self.base_url = self.RESOURCE + self.api_version + '/'
self.office_url = self.OFFICE365_AUTHORITY_URL + self.OFFICE365_AUTH_ENDPOINT
self.graph_url = self.AUTHORITY_URL + self.account_type + self.AUTH_ENDPOINT
self.office365 = office365
self.client_app = msal.ConfidentialClientApplication(
client_id=self.client_id,
authority=self.AUTHORITY_URL + self.account_type,
client_credential=self.client_secret
)
def _state(self, action: str, token_dict: dict = None) -> bool:
does_exists = pathlib.Path(self.credentials).exists()
if does_exists and action == 'load':
with open(file=self.credentials, mode='r') as state_file:
credentials = json.load(fp=state_file)
if 'refresh_token' in credentials:
self.refresh_token = credentials['refresh_token']
self.access_token = credentials['access_token']
self.id_token = credentials['id_token']
self.token_dict = credentials
return True
else:
return False
elif action == 'save':
token_dict['expires_in'] = time.time(
) + int(token_dict['expires_in'])
token_dict['ext_expires_in'] = time.time(
) + int(token_dict['ext_expires_in'])
self.refresh_token = token_dict['refresh_token']
self.access_token = token_dict['access_token']
self.id_token = token_dict['id_token']
self.token_dict = token_dict
with open(file=self.credentials, mode='w+') as state_file:
json.dump(obj=token_dict, fp=state_file, indent=2)
def _token_seconds(self, token_type: str = 'access_token') -> int:
if token_type == 'access_token':
if not self.access_token or (time.time() + 60 >= self.token_dict['expires_in']):
return 0
token_exp = int(self.token_dict['expires_in'] - time.time() - 60)
elif token_type == 'refresh_token':
if not self.refresh_token or (time.time() + 60 >= self.token_dict['ext_expires_in']):
return 0
token_exp = int(
self.token_dict['ext_expires_in'] - time.time() - 60
)
return token_exp
def _token_validation(self, nseconds: int = 60):
if self._token_seconds(token_type='access_token') < nseconds:
self.grab_refresh_token()
def _silent_sso(self) -> bool:
if self._token_seconds(token_type='access_token') > 0:
return True
elif self.refresh_token and self.grab_refresh_token():
return True
return False
def login(self) -> None:
self._state(action='load')
if self._silent_sso():
self.graph_session = GraphSession(client=self)
return True
else:
url = self.authorization_url()
print('Please go to URL provided authorize your account: {}'.format(url))
my_response = input('Paste the full URL redirect here: ')
self._redirect_code = my_response
self.grab_access_token()
self.graph_session = GraphSession(client=self)
def authorization_url(self):
auth_url = self.client_app.get_authorization_request_url(
scopes=self.scope,
state=self.state,
redirect_uri=self.redirect_uri
)
return auth_url
def grab_access_token(self) -> Dict:
query_dict = urllib.parse.parse_qs(self._redirect_code)
code = query_dict[self.redirect_uri + "?code"]
token_dict = self.client_app.acquire_token_by_authorization_code(
code=code,
scopes=self.scope,
redirect_uri=self.redirect_uri
)
self._state(
action='save',
token_dict=token_dict
)
return token_dict
def grab_refresh_token(self) -> Dict:
token_dict = self.client_app.acquire_token_by_refresh_token(
refresh_token=self.refresh_token,
scopes=self.scope
)
if 'error' in token_dict:
print(token_dict)
raise PermissionError("Permissions not authorized, delete json file and run again.")
self._state(
action='save',
token_dict=token_dict
)
return token_dict
def users(self) -> Users:
users_object: Users = Users(session=self.graph_session)
return users_object
def drives(self) -> Drives:
drives_object: Drives = Drives(session=self.graph_session)
return drives_object
def drive_item(self) -> DriveItems:
drive_items_object: Drives = DriveItems(session=self.graph_session)
return drive_items_object
|
MIT License
|
ibm/spacetech-kubesat
|
examples/hello_service.py
|
send_hello_message
|
python
|
async def send_hello_message(nats, shared_storage, logger):
message = nats.create_message({"message": "hello"})
await nats.send_message("public.hello", message)
print(f"SEND : {message.encode_json()}")
|
Send a hello message.
Args:
nats (NatsHandler): connection to nats used to send and receive messages
shared_storage (dict): dictionary that stores local data for the service
logger (NatsLogger): logger that can be used to communicate the state of the system
|
https://github.com/ibm/spacetech-kubesat/blob/e64372cc4cf71d9db7fe2395ba60d93722fccff6/examples/hello_service.py#L27-L40
|
import asyncio
import time
from kubesat.base_service import BaseService
SERVICE_TYPE = 'hello'
hello = BaseService(service_type=SERVICE_TYPE,
config_path='./service.json')
@hello.schedule_callback(2)
|
Apache License 2.0
|
renaudll/maya-mock
|
src/maya_mock/base/schema.py
|
MockedSessionSchema.to_json_file
|
python
|
def to_json_file(self, path, indent=1, sort_keys=True, **kwargs):
data = self.to_dict()
with open(path, "w") as stream:
json.dump(data, stream, sort_keys=sort_keys, indent=indent, **kwargs)
|
Export the schema to a json file.
:param str path: The path to the json file to save to
:param int indent: The indent to use. Default is 1
:param bool sort_keys: Should we sort the keys? Default is True
:param kwargs: Any keyword arguments are passed to `json.dump`
|
https://github.com/renaudll/maya-mock/blob/be5754affe03b06b7960872ac21d7ff04e8c5002/src/maya_mock/base/schema.py#L293-L304
|
import copy
import logging
import json
_LOG = logging.getLogger(__name__)
def get_namespace_parent(namespace):
return namespace.rsplit(".", 1)[0] if "." in namespace else None
def get_namespace_leaf(namespace):
return namespace.rsplit(".", 1)[-1]
def iter_namespaces(namespaces):
known = set()
def _subroutine(namespace):
if namespace in known:
return
parent_namespace = get_namespace_parent(namespace)
if parent_namespace:
for yielded in _subroutine(parent_namespace):
yield yielded
known.add(namespace)
yield namespace
for namespace in namespaces:
for yielded in _subroutine(namespace):
yield yielded
class NodeTypeDef(object):
def __init__(
self, namespace, data, classification, abstract=False, parent=None
):
if parent:
for key in parent.data.keys():
if key not in data:
_LOG.debug("Cannot find %r in %r", key, parent)
continue
data.pop(key)
self.parent = parent
self.namespace = namespace
self.type = get_namespace_leaf(namespace)
self._data = data
self.classification = classification
self.abstract = abstract
def __repr__(self):
return "<NodeTypeDef %r>" % self.type
@property
def data(self):
if self.parent:
result = copy.copy(self._data)
result.update(self.parent.data)
return result
return self._data
def apply(self, session, node):
for port_name, port_data in self.data.items():
session.create_port(node, port_name, user_defined=False, **port_data)
def to_dict(self):
return {
"namespace": self.namespace,
"attributes": self._data,
"classification": self.classification,
"abstract": self.abstract,
}
@classmethod
def from_dict(cls, data):
namespace = data["namespace"]
attributes = data["attributes"]
classification = data["classification"]
abstract = data.get("abstract", False)
return cls(namespace, attributes, classification, abstract=abstract)
class MockedSessionSchema(object):
def __init__(self, nodes=None, default_state=None):
if nodes and not isinstance(nodes, dict):
raise ValueError(
"Cannot initialize a schema from %s: %r" % (type(nodes), nodes)
)
self.nodes = nodes or {}
self.default_state = default_state or {}
def register_node(self, node):
if node.type in self.nodes:
raise Exception("Node type %r is already registered!" % node.type)
self.nodes[node.type] = node
def get(self, node_type):
return self.nodes.get(node_type)
def get_node_by_namespace(self, namespace):
return next(
(
node
for namespace_, node in self.nodes.items()
if namespace == namespace_
),
None,
)
def get_known_node_types(self):
return list(self.nodes.keys())
def to_dict(self):
return {
"nodes": {
node_type: node_def.to_dict()
for node_type, node_def in self.nodes.items()
},
"default_state": self.default_state,
}
@classmethod
def from_dict(cls, data):
nodes = data.get("nodes") or {}
default_state = data.get("default_state") or {}
nodes = {
node_name: NodeTypeDef.from_dict(node_data)
for node_name, node_data in nodes.items()
}
inst = cls(nodes=nodes, default_state=default_state)
return inst
@classmethod
def from_json_file(cls, path):
with open(path) as stream:
data = json.load(stream)
return cls.from_dict(data)
|
MIT License
|
nrel/gdx-pandas
|
gdxpds/write_gdx.py
|
Translator.__infer_data_type
|
python
|
def __infer_data_type(self,symbol_name,df):
var_or_eqn = False
df_col_names = df.columns
var_eqn_col_names = [col_name for col_name, col_ind in GAMS_VALUE_COLS_MAP[GamsDataType.Variable]]
if len(df_col_names) >= len(var_eqn_col_names):
var_or_eqn = True
trunc_df_col_names = df_col_names[len(df_col_names) - len(var_eqn_col_names):]
for i, df_col in enumerate(trunc_df_col_names):
if df_col and (df_col.lower() != var_eqn_col_names[i].lower()):
var_or_eqn = False
break
if var_or_eqn:
num_dims = len(df_col_names) - len(var_eqn_col_names)
if symbol_name[0].upper() == symbol_name[0]:
return GamsDataType.Variable, num_dims
else:
return GamsDataType.Equation, num_dims
num_dims = len(df_col_names) - 1
if len(df.index) > 0:
if isinstance(df.loc[df.index[0],df.columns[-1]],Number):
return GamsDataType.Parameter, num_dims
return GamsDataType.Set, num_dims
|
Returns
-------
(gdxpds.GamsDataType, int)
symbol type and number of dimensions implied by df
|
https://github.com/nrel/gdx-pandas/blob/24dae4f17e53d29fbac76bbff4b9febff19dae04/gdxpds/write_gdx.py#L107-L140
|
import logging
from numbers import Number
from gdxpds.tools import Error
from gdxpds.gdx import GdxFile, GdxSymbol, GAMS_VALUE_COLS_MAP, GamsDataType
import pandas as pds
logger = logging.getLogger(__name__)
class Translator(object):
def __init__(self,dataframes,gams_dir=None):
self.dataframes = dataframes
self.__gams_dir=None
def __exit__(self, *args):
if self.__gdx is not None:
self.__gdx.__exit__(self, *args)
def __del__(self):
if self.__gdx is not None:
self.__gdx.__del__()
@property
def dataframes(self):
return self.__dataframes
@dataframes.setter
def dataframes(self,value):
err_msg = "Expecting map of name, pandas.DataFrame pairs."
try:
for symbol_name, df in value.items():
if not isinstance(symbol_name, str): raise Error(err_msg)
if not isinstance(df, pds.DataFrame): raise Error(err_msg)
except AttributeError: raise Error(err_msg)
self.__dataframes = value
self.__gdx = None
@property
def gams_dir(self):
return self.__gams_dir
@gams_dir.setter
def gams_dir(self, value):
self.__gams_dir = value
@property
def gdx(self):
if self.__gdx is None:
self.__gdx = GdxFile(gams_dir=self.__gams_dir)
for symbol_name, df in self.dataframes.items():
self.__add_symbol_to_gdx(symbol_name, df)
return self.__gdx
def save_gdx(self,path,gams_dir=None):
if gams_dir is not None:
self.__gams_dir=gams_dir
self.gdx.write(path)
def __add_symbol_to_gdx(self, symbol_name, df):
data_type, num_dims = self.__infer_data_type(symbol_name,df)
logger.info("Inferred data type of {} to be {}.".format(symbol_name,data_type.name))
self.__gdx.append(GdxSymbol(symbol_name,data_type,dims=num_dims))
self.__gdx[symbol_name].dataframe = df
return
|
BSD 3-Clause New or Revised License
|
olitheolix/aiokubernetes
|
aiokubernetes/models/v1_daemon_set_list.py
|
V1DaemonSetList.metadata
|
python
|
def metadata(self, metadata):
self._metadata = metadata
|
Sets the metadata of this V1DaemonSetList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata # noqa: E501
:param metadata: The metadata of this V1DaemonSetList. # noqa: E501
:type: V1ListMeta
|
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_daemon_set_list.py#L149-L158
|
import pprint
import re
from aiokubernetes.models.v1_daemon_set import V1DaemonSet
from aiokubernetes.models.v1_list_meta import V1ListMeta
class V1DaemonSetList(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1DaemonSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
|
Apache License 2.0
|
bigmlcom/bigmler
|
bigmler/reports.py
|
clear_reports
|
python
|
def clear_reports(output_dir):
path = check_dir(os.path.join(output_dir, REPORTS_DIR, GAZIBIT_TOKEN))
for report_file in os.listdir(path):
input_file = os.path.join(path, report_file)
output_file = tempfile.NamedTemporaryFile(
mode="w", dir=output_dir, delete=False)
try:
with open(input_file, "r") as report_template:
with output_file as report_output:
content = report_template.read()
while content.find(SECTION_START_PREFIX) > 0:
start = content.find(SECTION_START_PREFIX)
end = content.find("\n",
content.find(SECTION_END_PREFIX))
content = "%s%s" % (content[0: start], content[end:])
report_output.write(content)
os.remove(input_file)
os.rename(output_file.name, input_file)
except IOError as exc:
os.remove(output_file.name)
sys.exit("Failed to generate the output report. %s" % str(exc))
|
Clears the sections useless sections
|
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/reports.py#L142-L167
|
import os
import sys
import tempfile
import shutil
import json
import re
import copy
import requests
import bigml.api
from bigmler.utils import is_shared, check_dir, get_url, log_created_resources
from bigmler.options.analyze import OPTIMIZE_OPTIONS
URL_TEMPLATE = "%%BIGML_%s%%"
SECTION_START = "\n%%BIGML_START_%s%%"
SECTION_START_PREFIX = SECTION_START[2: 15]
SECTION_END = "\n%%BIGML_END_%s%%"
SECTION_END_PREFIX = SECTION_END[2: 13]
REPORTS_DIR = "reports"
ANALYZE_PATH = "test"
ANALYZE_DIR = "analyze"
EMBEDDED_RESOURCES = ["MODEL"]
GAZIBIT = "gazibit"
BIGMLER_SCRIPT = os.path.dirname(__file__)
ANALYZE_TEMPLATE = "%s/static/analyze.html" % BIGMLER_SCRIPT
GAZIBIT_PRIVATE = "%s/static/gazibit.json" % BIGMLER_SCRIPT
GAZIBIT_SHARED = "%s/static/gazibit_shared.json" % BIGMLER_SCRIPT
GAZIBIT_TOKEN = "GAZIBIT_TOKEN"
GAZIBIT_CREATE_URL = "http://gazibit.com/api/v1/create"
GAZIBIT_HEADERS = {"X-Gazibit-API-Token": os.environ.get(GAZIBIT_TOKEN),
"Expect": "100-continue",
'content-type': 'application/x-www-form-urlencoded',
'Content-Length': 12544,
'User-Agent': ('curl/7.22.0 (x86_64-pc-linux-gnu)'
' libcurl/7.22.0 OpenSSL/1.0.1'
' zlib/1.2.3.4 libidn/1.23 librtmp/2.3')}
CROSS_VALIDATION_FILE = "evaluation.json"
MODEL_KEY = "model"
METRICS_FILE = "metrics.json"
EVALUATIONS_JSON_FILE = "evaluations_json.json"
SERVER_DIRECTORY = os.path.join("bigmler", "reports")
HOME = os.getenv("HOME") or (
os.path.join(os.getenv("HOMEDRIVE"), os.getenv("HOMEPATH")))
PREFIX = "average_"
SESSION_FILE = "bigmler_sessions"
HTTP_CREATED = 201
def check_subdir(path, subdir):
directory = os.path.join(path, subdir)
try:
os.stat(directory)
except OSError:
os.mkdir(directory)
def report(report_types_list, output_dir=None, resource=None):
shared = is_shared(resource)
for report_type in report_types_list:
REPORTS[report_type](resource, output_dir, shared)
def add_gazibit_links(resource, output_dir=None, shared=False):
try:
gazibit_tmp = GAZIBIT_SHARED if shared else GAZIBIT_PRIVATE
path = check_dir(os.path.join(output_dir,
REPORTS_DIR,
os.path.basename(gazibit_tmp)))
input_file = os.path.join(path, os.path.basename(gazibit_tmp))
output_file = tempfile.NamedTemporaryFile(
mode="w", dir=output_dir, delete=False)
if not os.path.isfile(input_file):
shutil.copyfile(gazibit_tmp, input_file)
with open(input_file, "r") as report_template:
with output_file as report_output:
content = report_template.read()
resource_type = bigml.api.get_resource_type(resource)
resource_type = resource_type.upper()
url_template = URL_TEMPLATE % resource_type
if shared and (resource_type in EMBEDDED_RESOURCES):
url = get_url(resource, embedded=True)
else:
url = get_url(resource, shared=shared)
content = content.replace(url_template, url)
section_template = SECTION_START % resource_type
content = content.replace(section_template, "")
section_template = SECTION_END % resource_type
content = content.replace(section_template, "")
report_output.write(content)
os.remove(input_file)
os.rename(output_file.name, input_file)
except IOError as exc:
os.remove(output_file.name)
sys.exit("Failed to generate the gazibit output report. %s" % str(exc))
|
Apache License 2.0
|
camel-lab/camel_tools
|
camel_tools/sentiment/__init__.py
|
SentimentAnalyzer.predict_sentence
|
python
|
def predict_sentence(self, sentence):
return self.predict([sentence])[0]
|
Predict the sentiment label of a single sentence.
Args:
sentence (:obj:`str`): Input sentence.
Returns:
:obj:`str`: The predicted sentiment label for given sentence.
|
https://github.com/camel-lab/camel_tools/blob/3f0655e382f9ef005892edccda7fe0a54a00f634/camel_tools/sentiment/__init__.py#L115-L125
|
import torch
import torch.nn.functional as torch_fun
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
from camel_tools.data import DataCatalogue
_LABELS = ('positive', 'negative', 'neutral')
class SentimentDataset(Dataset):
def __init__(self, sentences, tokenizer, max_seq_length):
self.encoded_sents = tokenizer(sentences, add_special_tokens=True,
padding=True, max_length=max_seq_length,
truncation=True, return_tensors="pt")
def __getitem__(self, idx):
return {
'input_ids': self.encoded_sents.input_ids[idx],
'token_type_ids': self.encoded_sents.token_type_ids[idx],
'attention_mask': self.encoded_sents.attention_mask[idx]
}
def __len__(self):
return self.encoded_sents.input_ids.shape[0]
class SentimentAnalyzer:
def __init__(self, model_path, use_gpu=True):
self.model = BertForSequenceClassification.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.labels_map = self.model.config.id2label
self.use_gpu = use_gpu
@staticmethod
def pretrained(model_name=None, use_gpu=True):
model_info = DataCatalogue.get_dataset_info('SentimentAnalysis',
model_name)
model_path = str(model_info.path)
return SentimentAnalyzer(model_path, use_gpu)
@staticmethod
def labels():
return list(_LABELS)
|
MIT License
|
nuitka/nuitka
|
nuitka/importing/Importing.py
|
setMainScriptDirectory
|
python
|
def setMainScriptDirectory(main_dir):
global main_path
main_path = main_dir
|
Initialize the main script directory.
We use this as part of the search path for modules.
|
https://github.com/nuitka/nuitka/blob/4c5161620ea8f0f1c93a1d6be79e7e6eda7161d4/nuitka/importing/Importing.py#L70-L78
|
from __future__ import print_function
import collections
import hashlib
import imp
import os
import sys
import zipfile
from nuitka import Options
from nuitka.containers.oset import OrderedSet
from nuitka.importing import StandardLibrary
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import recursion_logger
from nuitka.utils.AppDirs import getCacheDir
from nuitka.utils.FileOperations import listDir
from nuitka.utils.Importing import getSharedLibrarySuffixes
from nuitka.utils.ModuleNames import ModuleName
from nuitka.utils.Utils import getOS, isMacOS
from .IgnoreListing import isIgnoreListedNotExistingModule
from .PreloadedPackages import getPreloadedPackagePath, isPreloadedPackagePath
_debug_module_finding = Options.shallExplainImports()
warned_about = set()
main_path = None
|
Apache License 2.0
|
google/digitalbuildings
|
tools/validators/ontology_validator/yamlformat/validator/findings_lib.py
|
Findings.AddFindings
|
python
|
def AddFindings(self, findings):
for finding in findings:
self.AddFinding(finding)
|
Add an error to errors_list.
Args:
findings: required list of Finding objects.
Raises:
TypeError: a finding is not a member of the Finding base class.
|
https://github.com/google/digitalbuildings/blob/de226cbb91ee778eec970a5685c9bd92ee69a503/tools/validators/ontology_validator/yamlformat/validator/findings_lib.py#L174-L184
|
import copy
import operator
MAX_RANK = 1000000000
MISSING_PARENT_VALIDATION_RANK = 60
def MakeFieldString(field):
field_str = ''
if field.optional:
field_str += '(opt) '
field_str += '{0}/{1}{2}'.format(field.field.namespace, field.field.field,
field.field.increment)
return field_str
def _SortFindings(findings):
return sorted(
findings,
key=operator.attrgetter('type_rank', 'category_rank', 'inner_rank',
'is_slave'))
def _DedupFindings(findings):
finding_map = {}
for finding in findings:
if finding in finding_map:
finding = _SortFindings([finding, finding_map.get(finding)])[0]
finding_map[finding] = finding
return finding_map.values()
class FileContext(object):
def __init__(self, filepath, begin_line_number=None, end_line_number=None):
self.begin_line_number = begin_line_number
self.end_line_number = end_line_number
self.raw_filepath = filepath
self.filepath = filepath
def GetLineInfo(self):
if self.begin_line_number and self.end_line_number:
return '(Lines {0} - {1})'.format(self.begin_line_number,
self.end_line_number)
if self.begin_line_number:
return '(Line {0})'.format(self.begin_line_number)
return ''
class Finding(object):
def __init__(self,
message: str,
file_context: 'FileContext',
type_rank: int = MAX_RANK,
category_rank: int = MAX_RANK,
inner_rank: int = MAX_RANK,
equality_key: str = None,
is_master: bool = False):
super(Finding, self).__init__()
if not isinstance(message, str):
raise TypeError('Argument {0} is not a string'.format(message))
if file_context is not None:
if not isinstance(file_context, FileContext):
raise TypeError(
'Argument {0} is not a FileContext object.'.format(file_context))
self.message = message
self.file_context = file_context
self.type_rank = type_rank
self.inner_rank = inner_rank
self.category_rank = category_rank
self.equality_key = equality_key
self.is_master = is_master
self.is_slave = not is_master
def __eq__(self, other):
if isinstance(other, Finding):
if self.equality_key is not None:
return self.equality_key == other.equality_key
return id(self) == id(other)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self.equality_key is not None:
return self.equality_key.__hash__()
return hash(self.__repr__())
def __str__(self):
return self.message
class Findings(object):
def __init__(self):
self._findings_list = []
self._is_changed = False
def _GetDynamicFindings(self, filter_old_warnings):
del filter_old_warnings
return []
|
Apache License 2.0
|
googlecloudplatform/django-cloud-deploy
|
django_cloud_deploy/skeleton/source_generator.py
|
_FileGenerator.generate_new
|
python
|
def generate_new(self):
|
Generate new source files.
|
https://github.com/googlecloudplatform/django-cloud-deploy/blob/d316b1e45357761e2b124143e6e12ce34ef6f975/django_cloud_deploy/skeleton/source_generator.py#L37-L38
|
import os
import re
import shutil
import subprocess
import sys
from typing import Any, Dict, List, Optional, Set
import django
from django.core.management import utils as django_utils
from django.utils import version
from django_cloud_deploy import crash_handling
from django_cloud_deploy.skeleton import requirements_parser
import jinja2
class _FileGenerator(object):
def generate_from_existing(self):
|
Apache License 2.0
|
junma11/adam2020
|
nnUNet/nnunet/training/network_training/nnUNetTrainerV2.py
|
nnUNetTrainerV2.validate
|
python
|
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0):
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian,
overwrite, validation_folder_name, debug, all_in_gpu,
force_separate_z=force_separate_z, interpolation_order=interpolation_order,
interpolation_order_z=interpolation_order_z)
self.network.do_ds = ds
return ret
|
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
|
https://github.com/junma11/adam2020/blob/c1670a1883aa6e72f5eb296595bc015f63375705/nnUNet/nnunet/training/network_training/nnUNetTrainerV2.py#L204-L218
|
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, get_patch_size, default_3D_augmentation_params
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from torch.nn.utils import clip_grad_norm_
from nnunet.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
try:
from apex import amp
except ImportError:
amp = None
class nnUNetTrainerV2(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
net_numpool = len(self.net_num_pool_op_kernel_sizes)
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
|
Apache License 2.0
|
cigroup-ol/metaopt
|
metaopt/core/stoppable/util/decorator.py
|
stopping
|
python
|
def stopping(method):
def wrapped_method(self, *args, **kwargs):
self._stopped = True
return method(self, *args, **kwargs)
return wrapped_method
|
Decorator that notes that is stopped.
Note that it needs to placed after an eventual stoppable decorator.
|
https://github.com/cigroup-ol/metaopt/blob/6dfd5105d3c6eaf00f96670175cae16021069514/metaopt/core/stoppable/util/decorator.py#L23-L33
|
from __future__ import absolute_import, division, print_function, unicode_literals, with_statement
from metaopt.core.stoppable.util.exception import StoppedError
def stoppable(method):
def wrapped_method(self, *args, **kwargs):
if self.stopped:
raise StoppedError()
return method(self, *args, **kwargs)
return wrapped_method
|
BSD 3-Clause New or Revised License
|
dmlc/dgl
|
python/dgl/nn/tensorflow/glob.py
|
MaxPooling.call
|
python
|
def call(self, graph, feat):
with graph.local_scope():
graph.ndata['h'] = feat
readout = max_nodes(graph, 'h')
return readout
|
r"""Compute max pooling.
Parameters
----------
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature with shape :math:`(N, *)` where
:math:`N` is the number of nodes in the graph.
Returns
-------
tf.Tensor
The output feature with shape :math:`(B, *)`, where
:math:`B` refers to the batch size.
|
https://github.com/dmlc/dgl/blob/8341244a2dac850bd0c1153c7641c3b8a2bbfc30/python/dgl/nn/tensorflow/glob.py#L92-L112
|
import tensorflow as tf
from tensorflow.keras import layers
from ...readout import sum_nodes, mean_nodes, max_nodes, softmax_nodes, topk_nodes
__all__ = ['SumPooling', 'AvgPooling',
'MaxPooling', 'SortPooling', 'WeightAndSum', 'GlobalAttentionPooling']
class SumPooling(layers.Layer):
def __init__(self):
super(SumPooling, self).__init__()
def call(self, graph, feat):
with graph.local_scope():
graph.ndata['h'] = feat
readout = sum_nodes(graph, 'h')
return readout
class AvgPooling(layers.Layer):
def __init__(self):
super(AvgPooling, self).__init__()
def call(self, graph, feat):
with graph.local_scope():
graph.ndata['h'] = feat
readout = mean_nodes(graph, 'h')
return readout
class MaxPooling(layers.Layer):
def __init__(self):
super(MaxPooling, self).__init__()
|
Apache License 2.0
|
jboomer/python-isobus
|
isobus/common.py
|
IBSID.GetCANID
|
python
|
def GetCANID(self):
canid = 0
if ((self.pgn >> 8) & 0xFF) < 0xEF:
canid = (((self.prio & 0x7) << 26)
| ((self.pgn & 0xFF00) << 8)
| ((self.da & 0xFF) << 8)
| (self.sa & 0xFF))
else :
canid = (((self.prio & 0x7) << 26)
| ((self.pgn & 0xFFFF) << 8)
| (self.sa & 0xFF))
return canid
|
Return the CAN ID as a 29 bit identifier
|
https://github.com/jboomer/python-isobus/blob/da09e6ecaf27a540a87d19319e4bdeb36685aea3/isobus/common.py#L45-L60
|
class IBSException(Exception):
pass
class NumericValue():
def __init__(self, value):
self.value = value
@classmethod
def FromLEBytes(cls, databytes):
value = sum([(databytes[n] << n*8) for n in range(len(databytes))])
return cls(value)
@classmethod
def FromBEBytes(cls, databytes):
value = sum([(databytes[n] << n*8) for n in reversed(range(len(databytes)))])
return cls(value)
def AsLEBytes(self, nBytes = 4):
return [(self.value >> (n * 8) & 0xFF) for n in range(nBytes)]
def AsBEBytes(self, nBytes = 4):
return [(self.value >> (n * 8) & 0xFF) for n in reversed(range(nBytes))]
def AsString(self):
return '0x{value1:08X} ({value2})'.format(value1 = self.value, value2 = self.value)
def Value(self):
return self.value
class IBSID():
def __init__(self, da, sa, pgn, prio=6):
self.da = da
self.sa = sa
self.pgn = pgn
self.prio = prio
|
MIT License
|
myriadrf/pylms7002soapy
|
pyLMS7002Soapy/LMS7002_DCCAL.py
|
LMS7002_DCCAL.DC_RXBI
|
python
|
def DC_RXBI(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-63 <= value <= 63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBI', 'DC_RXBI<6:0>', val)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
self._writeReg('RXBI', 'DCWR_RXBI', 1)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
else:
raise ValueError("Bitfield RXBI is not supported on chip version " + str(self.chip.chipID))
|
Set the value of DC_RXBI
|
https://github.com/myriadrf/pylms7002soapy/blob/4f828eb9282c302dc6b187d91df5e77c8a6f2d61/pyLMS7002Soapy/LMS7002_DCCAL.py#L623-L636
|
from pyLMS7002Soapy.LMS7002_base import LMS7002_base
class LMS7002_DCCAL(LMS7002_base):
__slots__ = []
def __init__(self, chip):
self.chip = chip
self.channel = None
self.prefix = "DCCAL_"
@property
def DCMODE(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'DCMODE')
else:
raise ValueError("Bitfield DCMODE is not supported on chip version " + str(self.chip.chipID))
@DCMODE.setter
def DCMODE(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1, 'MANUAL', 'AUTO']:
raise ValueError("Value must be [0,1,'MANUAL','AUTO']")
if value == 0 or value == 'MANUAL':
val = 0
else:
val = 1
self._writeReg('CFG', 'DCMODE', val)
else:
raise ValueError("Bitfield DCMODE is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCDAC_RXB(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXB')
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version " + str(self.chip.chipID))
@PD_DCDAC_RXB.setter
def PD_DCDAC_RXB(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCDAC_RXA(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXA')
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version " + str(self.chip.chipID))
@PD_DCDAC_RXA.setter
def PD_DCDAC_RXA(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCDAC_TXB(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXB')
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version " + str(self.chip.chipID))
@PD_DCDAC_TXB.setter
def PD_DCDAC_TXB(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCDAC_TXA(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXA')
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version " + str(self.chip.chipID))
@PD_DCDAC_TXA.setter
def PD_DCDAC_TXA(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCCMP_RXB(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXB')
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version " + str(self.chip.chipID))
@PD_DCCMP_RXB.setter
def PD_DCCMP_RXB(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCCMP_RXA(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXA')
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version " + str(self.chip.chipID))
@PD_DCCMP_RXA.setter
def PD_DCCMP_RXA(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCCMP_TXB(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXB')
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version " + str(self.chip.chipID))
@PD_DCCMP_TXB.setter
def PD_DCCMP_TXB(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version " + str(self.chip.chipID))
@property
def PD_DCCMP_TXA(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXA')
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version " + str(self.chip.chipID))
@PD_DCCMP_TXA.setter
def PD_DCCMP_TXA(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version " + str(self.chip.chipID))
@property
def DCCAL_CALSTATUS(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CALSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version " + str(self.chip.chipID))
@DCCAL_CALSTATUS.setter
def DCCAL_CALSTATUS(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (0 <= value <= 255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CALSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version " + str(self.chip.chipID))
@property
def DCCAL_CMPSTATUS(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CMPSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version " + str(self.chip.chipID))
@DCCAL_CMPSTATUS.setter
def DCCAL_CMPSTATUS(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (0 <= value <= 255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CMPSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version " + str(self.chip.chipID))
@property
def DCCAL_CMPCFG(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_CMPCFG<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version " + str(self.chip.chipID))
@DCCAL_CMPCFG.setter
def DCCAL_CMPCFG(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (0 <= value <= 255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_CMPCFG<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version " + str(self.chip.chipID))
@property
def DCCAL_START(self):
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_START<7:0>')
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version " + str(self.chip.chipID))
@DCCAL_START.setter
def DCCAL_START(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (0 <= value <= 255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_START<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version " + str(self.chip.chipID))
def startRXBQ(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 7
self.DCCAL_START = 0
def startRXBI(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 6
self.DCCAL_START = 0
def startRXAQ(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 5
self.DCCAL_START = 0
def startRXAI(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 4
self.DCCAL_START = 0
def startTXBQ(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 3
self.DCCAL_START = 0
def startTXBI(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 2
self.DCCAL_START = 0
def startTXAQ(self):
self.DCCAL_START = 0
self.DCCAL_START = 1 << 1
self.DCCAL_START = 0
def startTXAI(self):
self.DCCAL_START = 0
self.DCCAL_START = 1
self.DCCAL_START = 0
@property
def DC_TXAI(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAI', 'DCRD_TXAI', 0)
self._writeReg('TXAI', 'DCRD_TXAI', 1)
self._writeReg('TXAI', 'DCRD_TXAI', 0)
val = self._readReg('TXAI', 'DC_TXAI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAI is not supported on chip version " + str(self.chip.chipID))
@DC_TXAI.setter
def DC_TXAI(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-1024 <= value <= 1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAI', 'DC_TXAI<10:0>', val)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
self._writeReg('TXAI', 'DCWR_TXAI', 1)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
else:
raise ValueError("Bitfield TXAI is not supported on chip version " + str(self.chip.chipID))
@property
def DC_TXAQ(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
self._writeReg('TXAQ', 'DCRD_TXAQ', 1)
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
val = self._readReg('TXAQ', 'DC_TXAQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAQ is not supported on chip version " + str(self.chip.chipID))
@DC_TXAQ.setter
def DC_TXAQ(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-1024 <= value <= 1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAQ', 'DC_TXAQ<10:0>', val)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
self._writeReg('TXAQ', 'DCWR_TXAQ', 1)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
else:
raise ValueError("Bitfield TXAQ is not supported on chip version " + str(self.chip.chipID))
@property
def DC_TXBI(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBI', 'DCRD_TXBI', 0)
self._writeReg('TXBI', 'DCRD_TXBI', 1)
self._writeReg('TXBI', 'DCRD_TXBI', 0)
val = self._readReg('TXBI', 'DC_TXBI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBI is not supported on chip version " + str(self.chip.chipID))
@DC_TXBI.setter
def DC_TXBI(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-1024 <= value <= 1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBI', 'DC_TXBI<10:0>', val)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
self._writeReg('TXBI', 'DCWR_TXBI', 1)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
else:
raise ValueError("Bitfield TXBI is not supported on chip version " + str(self.chip.chipID))
@property
def DC_TXBQ(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
self._writeReg('TXBQ', 'DCRD_TXBQ', 1)
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
val = self._readReg('TXBQ', 'DC_TXBQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBQ is not supported on chip version " + str(self.chip.chipID))
@DC_TXBQ.setter
def DC_TXBQ(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-1024 <= value <= 1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBQ', 'DC_TXBQ<10:0>', val)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
self._writeReg('TXBQ', 'DCWR_TXBQ', 1)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
else:
raise ValueError("Bitfield TXBQ is not supported on chip version " + str(self.chip.chipID))
@property
def DC_RXAI(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAI', 'DCRD_RXAI', 0)
self._writeReg('RXAI', 'DCRD_RXAI', 1)
self._writeReg('RXAI', 'DCRD_RXAI', 0)
val = self._readReg('RXAI', 'DC_RXAI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAI is not supported on chip version " + str(self.chip.chipID))
@DC_RXAI.setter
def DC_RXAI(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-63 <= value <= 63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAI', 'DC_RXAI<6:0>', val)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
self._writeReg('RXAI', 'DCWR_RXAI', 1)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
else:
raise ValueError("Bitfield RXAI is not supported on chip version " + str(self.chip.chipID))
@property
def DC_RXAQ(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
self._writeReg('RXAQ', 'DCRD_RXAQ', 1)
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
val = self._readReg('RXAQ', 'DC_RXAQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAQ is not supported on chip version " + str(self.chip.chipID))
@DC_RXAQ.setter
def DC_RXAQ(self, value):
if self.chip.chipID == self.chip.chipIDMR3:
if not (-63 <= value <= 63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAQ', 'DC_RXAQ<6:0>', val)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
self._writeReg('RXAQ', 'DCWR_RXAQ', 1)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
else:
raise ValueError("Bitfield RXAQ is not supported on chip version " + str(self.chip.chipID))
@property
def DC_RXBI(self):
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBI', 'DCRD_RXBI', 0)
self._writeReg('RXBI', 'DCRD_RXBI', 1)
self._writeReg('RXBI', 'DCRD_RXBI', 0)
val = self._readReg('RXBI', 'DC_RXBI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBI is not supported on chip version " + str(self.chip.chipID))
@DC_RXBI.setter
|
Apache License 2.0
|
iheartradio/henson
|
henson/base.py
|
Application.__repr__
|
python
|
def __repr__(self):
return '<Application: {}>'.format(self)
|
Return representation of name of application.
|
https://github.com/iheartradio/henson/blob/087931281fce144c50f32b7e49ae9be9f3bcbb8d/henson/base.py#L70-L72
|
import asyncio
import queue as sync_queue
from contextlib import suppress
from copy import deepcopy
import logging
import sys
import traceback
from .config import Config
from .exceptions import Abort
__all__ = ('Application',)
class Application:
def __init__(self, name, settings=None, *, consumer=None, callback=None):
self.name = name
self.settings = Config()
self.settings.from_object(settings or {})
self.settings.setdefault('DEBUG', False)
self.settings.setdefault('SLEEP_TIME', 0.1)
self.settings.setdefault('ASYNC_QUEUE', True)
self.callback = callback
self._callbacks = {
'error': [],
'message_acknowledgement': [],
'message_preprocessor': [],
'result_postprocessor': [],
'startup': [],
'teardown': [],
}
self.extensions = {}
self.consumer = consumer
self.logger = logging.getLogger(self.name)
def __str__(self):
return self.name
|
Apache License 2.0
|
exopy/exopy
|
exopy/tasks/tasks/logic/loop_exceptions_tasks.py
|
ContinueTask.perform
|
python
|
def perform(self):
if self.format_and_eval_string(self.condition):
raise ContinueException()
|
If the condition evaluates to true, continue.
|
https://github.com/exopy/exopy/blob/aeda9bcfad2d2f76903c7ad2800ea2110ff689b2/exopy/tasks/tasks/logic/loop_exceptions_tasks.py#L81-L86
|
from atom.api import (Str, set_default)
from ..validators import Feval
from ..base_tasks import SimpleTask
from .loop_task import LoopTask
from .while_task import WhileTask
from .loop_exceptions import BreakException, ContinueException
class BreakTask(SimpleTask):
condition = Str().tag(pref=True, feval=Feval())
parallel = set_default({'forbidden': True})
def check(self, *args, **kwargs):
test, traceback = super(BreakTask, self).check(*args, **kwargs)
if not isinstance(self.parent, (LoopTask, WhileTask)):
test = False
mess = 'Incorrect parent type: {}, expected LoopTask or WhileTask.'
traceback[self.path + '/' + self.name + '-parent'] = mess.format(self.parent.task_id)
return test, traceback
def perform(self):
if self.format_and_eval_string(self.condition):
raise BreakException()
class ContinueTask(SimpleTask):
condition = Str().tag(pref=True, feval=Feval())
parallel = set_default({'forbidden': True})
def check(self, *args, **kwargs):
test, traceback = super(ContinueTask, self).check(*args, **kwargs)
if not isinstance(self.parent, (LoopTask, WhileTask)):
test = False
mess = 'Incorrect parent type: {}, expected LoopTask or WhileTask.'
traceback[self.path + '/' + self.name + '-parent'] = mess.format(self.parent.task_id)
return test, traceback
|
BSD 3-Clause New or Revised License
|
dipdup-net/dipdup-py
|
src/dipdup/codegen.py
|
DipDupCodeGenerator.create_package
|
python
|
async def create_package(self) -> None:
try:
package_path = self._config.package_path
except ImportError:
self._logger.info('Creating package `%s`', self._config.package)
package_path = join(os.getcwd(), self._config.package)
touch(join(package_path, '__init__.py'))
models_path = join(package_path, 'models.py')
if not exists(models_path):
template = load_template('models.py')
models_code = template.render()
write(models_path, models_code)
for subpackage in ('handlers', 'hooks'):
subpackage_path = join(self._config.package_path, subpackage)
touch(join(subpackage_path, '__init__.py'))
sql_path = join(self._config.package_path, 'sql')
touch(join(sql_path, '.keep'))
graphql_path = join(self._config.package_path, 'graphql')
touch(join(graphql_path, '.keep'))
|
Create Python package skeleton if not exists
|
https://github.com/dipdup-net/dipdup-py/blob/49b0c1e3310c56196f6100a69c5e13213e1b6fcc/src/dipdup/codegen.py#L98-L122
|
import json
import logging
import os
import re
import subprocess
from copy import copy
from os.path import basename, dirname, exists, join, relpath, splitext
from shutil import rmtree
from typing import Any, Dict, List, cast
from jinja2 import Template
from dipdup import __version__
from dipdup.config import (
BigMapIndexConfig,
CallbackMixin,
ContractConfig,
DatasourceConfigT,
DipDupConfig,
HandlerConfig,
HeadIndexConfig,
IndexTemplateConfig,
OperationHandlerOriginationPatternConfig,
OperationHandlerTransactionPatternConfig,
OperationIndexConfig,
TzktDatasourceConfig,
default_hooks,
)
from dipdup.datasources.datasource import Datasource
from dipdup.datasources.tzkt.datasource import TzktDatasource
from dipdup.exceptions import ConfigInitializationException, ConfigurationError
from dipdup.utils import import_submodules, mkdir_p, pascal_to_snake, snake_to_pascal, touch, write
DEFAULT_DOCKER_ENV_FILE_CONTENT = dict(
POSTGRES_USER="dipdup",
POSTGRES_DB="dipdup",
POSTGRES_PASSWORD="changeme",
HASURA_GRAPHQL_DATABASE_URL="postgres://dipdup:changeme@db:5432/dipdup",
HASURA_GRAPHQL_ENABLE_CONSOLE="true",
HASURA_GRAPHQL_ADMIN_INTERNAL_ERRORS="true",
HASURA_GRAPHQL_ENABLED_LOG_TYPES="startup, http-log, webhook-log, websocket-log, query-log",
HASURA_GRAPHQL_ADMIN_SECRET="changeme",
HASURA_GRAPHQL_UNAUTHORIZED_ROLE="user",
)
DEFAULT_DOCKER_IMAGE = 'dipdup/dipdup'
DEFAULT_DOCKER_TAG = __version__
DEFAULT_DOCKER_ENV_FILE = 'dipdup.env'
_templates: Dict[str, Template] = {}
def resolve_big_maps(schema: Dict[str, Any]) -> Dict[str, Any]:
if 'properties' in schema:
return {
**schema,
'properties': {prop: resolve_big_maps(sub_schema) for prop, sub_schema in schema['properties'].items()},
}
elif schema.get('$comment') == 'big_map':
return schema['oneOf'][1]
else:
return schema
def load_template(name: str) -> Template:
if name not in _templates:
with open(join(dirname(__file__), 'templates', name + '.j2'), 'r') as f:
return Template(f.read())
return _templates[name]
class DipDupCodeGenerator:
def __init__(self, config: DipDupConfig, datasources: Dict[DatasourceConfigT, Datasource]) -> None:
self._logger = logging.getLogger('dipdup.codegen')
self._config = config
self._datasources = datasources
self._schemas: Dict[TzktDatasourceConfig, Dict[str, Dict[str, Any]]] = {}
async def init(self, overwrite_types: bool = False) -> None:
self._logger.info('Initializing project')
await self.create_package()
await self.fetch_schemas()
await self.generate_types(overwrite_types)
await self.generate_hooks()
await self.generate_handlers()
await self.cleanup()
await self.verify_package()
async def docker_init(self, image: str, tag: str, env_file: str) -> None:
self._logger.info('Initializing Docker inventory')
await self.generate_docker(image, tag, env_file)
await self.verify_package()
|
MIT License
|
kuri65536/python-for-android
|
python-modules/twisted/twisted/test/test_process.py
|
UtilityProcessProtocol.parseChunks
|
python
|
def parseChunks(self, bytes):
raise NotImplementedError()
|
Called with all bytes received on stdout when the process exits.
|
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/test/test_process.py#L374-L378
|
import gzip
import os
import sys
import signal
import StringIO
import errno
import gc
import stat
try:
import fcntl
except ImportError:
fcntl = process = None
else:
from twisted.internet import process
from zope.interface.verify import verifyObject
from twisted.python.log import msg
from twisted.internet import reactor, protocol, error, interfaces, defer
from twisted.trial import unittest
from twisted.python import util, runtime, procutils
from twisted.python.compat import set
class StubProcessProtocol(protocol.ProcessProtocol):
def outReceived(self, data):
raise NotImplementedError()
def errReceived(self, data):
raise NotImplementedError()
def inConnectionLost(self):
raise NotImplementedError()
def outConnectionLost(self):
raise NotImplementedError()
def errConnectionLost(self):
raise NotImplementedError()
class ProcessProtocolTests(unittest.TestCase):
def test_interface(self):
verifyObject(interfaces.IProcessProtocol, protocol.ProcessProtocol())
def test_outReceived(self):
received = []
class OutProtocol(StubProcessProtocol):
def outReceived(self, data):
received.append(data)
bytes = "bytes"
p = OutProtocol()
p.childDataReceived(1, bytes)
self.assertEqual(received, [bytes])
def test_errReceived(self):
received = []
class ErrProtocol(StubProcessProtocol):
def errReceived(self, data):
received.append(data)
bytes = "bytes"
p = ErrProtocol()
p.childDataReceived(2, bytes)
self.assertEqual(received, [bytes])
def test_inConnectionLost(self):
lost = []
class InLostProtocol(StubProcessProtocol):
def inConnectionLost(self):
lost.append(None)
p = InLostProtocol()
p.childConnectionLost(0)
self.assertEqual(lost, [None])
def test_outConnectionLost(self):
lost = []
class OutLostProtocol(StubProcessProtocol):
def outConnectionLost(self):
lost.append(None)
p = OutLostProtocol()
p.childConnectionLost(1)
self.assertEqual(lost, [None])
def test_errConnectionLost(self):
lost = []
class ErrLostProtocol(StubProcessProtocol):
def errConnectionLost(self):
lost.append(None)
p = ErrLostProtocol()
p.childConnectionLost(2)
self.assertEqual(lost, [None])
class TrivialProcessProtocol(protocol.ProcessProtocol):
def __init__(self, d):
self.deferred = d
self.outData = []
self.errData = []
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
def outReceived(self, data):
self.outData.append(data)
def errReceived(self, data):
self.errData.append(data)
class TestProcessProtocol(protocol.ProcessProtocol):
def connectionMade(self):
self.stages = [1]
self.data = ''
self.err = ''
self.transport.write("abcd")
def childDataReceived(self, childFD, data):
if childFD == 1:
self.data += data
elif childFD == 2:
self.err += data
def childConnectionLost(self, childFD):
if childFD == 1:
self.stages.append(2)
if self.data != "abcd":
raise RuntimeError
self.transport.write("1234")
elif childFD == 2:
self.stages.append(3)
if self.err != "1234":
print 'err != 1234: ' + repr(self.err)
raise RuntimeError()
self.transport.write("abcd")
self.stages.append(4)
elif childFD == 0:
self.stages.append(5)
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class EchoProtocol(protocol.ProcessProtocol):
s = "1234567" * 1001
n = 10
finished = 0
failure = None
def __init__(self, onEnded):
self.onEnded = onEnded
self.count = 0
def connectionMade(self):
assert self.n > 2
for i in range(self.n - 2):
self.transport.write(self.s)
self.transport.writeSequence([self.s, self.s])
self.buffer = self.s * self.n
def outReceived(self, data):
if buffer(self.buffer, self.count, len(data)) != buffer(data):
self.failure = ("wrong bytes received", data, self.count)
self.transport.closeStdin()
else:
self.count += len(data)
if self.count == len(self.buffer):
self.transport.closeStdin()
def processEnded(self, reason):
self.finished = 1
if not reason.check(error.ProcessDone):
self.failure = "process didn't terminate normally: " + str(reason)
self.onEnded.callback(self)
class SignalProtocol(protocol.ProcessProtocol):
def __init__(self, deferred, sig):
self.deferred = deferred
self.signal = sig
self.signaled = False
def outReceived(self, data):
msg("Received %r from child stdout" % (data,))
if not self.signaled:
self.signaled = True
self.transport.signalProcess(self.signal)
def errReceived(self, data):
msg("Received %r from child stderr" % (data,))
def processEnded(self, reason):
msg("Child exited: %r" % (reason.getTraceback(),))
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if isinstance(self.signal, str):
signalValue = getattr(signal, 'SIG' + self.signal)
else:
signalValue = self.signal
if v.exitCode is not None:
return self.deferred.errback(
ValueError("SIG%s: exitCode is %s, not None" %
(self.signal, v.exitCode)))
if v.signal != signalValue:
return self.deferred.errback(
ValueError("SIG%s: .signal was %s, wanted %s" %
(self.signal, v.signal, signalValue)))
if os.WTERMSIG(v.status) != signalValue:
return self.deferred.errback(
ValueError('SIG%s: %s' % (self.signal, os.WTERMSIG(v.status))))
self.deferred.callback(None)
class TestManyProcessProtocol(TestProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
def processEnded(self, reason):
self.reason = reason
if reason.check(error.ProcessDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class UtilityProcessProtocol(protocol.ProcessProtocol):
program = None
def run(cls, reactor, argv, env):
exe = sys.executable
self = cls()
reactor.spawnProcess(
self, exe, [exe, "-c", self.program] + argv, env=env)
return self
run = classmethod(run)
def __init__(self):
self.bytes = []
self.requests = []
|
Apache License 2.0
|
livenson/vcdm
|
src/vcdm/backends/datastore/couchdb_store.py
|
CouchDBStore.find_by_path
|
python
|
def find_by_path(self, path, object_type=None, fields=None):
return self.find_by_property('fullpath', path, object_type, fields)
|
Find an object at a given path.
- object_type - optional filter by the type of an object (e.g. blob, container, ...)
- fields - fields to retrieve from the database. By default only gets UID of an object
|
https://github.com/livenson/vcdm/blob/cb53e2f6d0e7d37f72c3c11f3c8369c698ebedbe/src/vcdm/backends/datastore/couchdb_store.py#L168-L173
|
import socket
import time
from uuid import uuid4
import sys
from distutils.version import StrictVersion
import couchdb
from vcdm.config import get_config
from vcdm.errors import InternalError
config = get_config()
class CouchDBStore(object):
db = None
def __init__(self):
server = couchdb.Server(config.get('couchdb', 'datastore.endpoint'))
try:
version = server.version()
assert StrictVersion(version) > '1.0'
except socket.error as e:
print "Failed to connect to a CouchDB instance at %s" % config.get('couchdb', 'datastore.endpoint')
print "[%s] %s" % (e.errno, e.strerror)
sys.exit(-1)
except AssertionError:
print "Couchdb server version '%s' is not supported. At least version 1.0 is required." % version
sys.exit(-1)
if 'meta' not in server:
self.db = server.create('meta')
else:
self.db = server['meta']
if self.find_by_path('/', 'container')[0] is None:
self.write({
'object': 'container',
'fullpath': '/',
'name': '/',
'parent_container': '/',
'children': {},
'metadata': {},
'owner': 'system',
'ctime': str(time.time()),
'mtime': str(time.time())}, None)
def read(self, docid):
return self.db[docid]
def write(self, data, docid=None):
if docid is None:
docid = uuid4().hex
if docid in self.db:
doc = self.db[docid]
doc.update(data)
self.db.save(doc)
else:
data['_id'] = docid
self.db.save(data)
return docid
def exists(self, docid):
return (docid in self.db)
def delete(self, docid):
del self.db[docid]
def find_uid_match(self, pattern):
dirn_fun = '''
function(doc) {
if (doc.fullpath.match(/^%s/)) {
emit(doc.id, doc.fullpath);
}
}
''' % pattern.replace("/", "\\/")
return list(self.db.query(dirn_fun))
def get_total_blob_size(self, start_time, end_time, avatar='Anonymous'):
dirn_fun = '''
function(doc) {
if (doc.ctime > %s && doc.ctime < %s && doc.object == 'blob' && doc.owner == '%s') {
emit(doc.size, null);
}
}
''' % (start_time, end_time, avatar)
return sum([x.key for x in self.db.query(dirn_fun)])
def get_all_avatars(self):
dirn_fun = '''
function(doc) {
if (doc.object == 'blob' || doc.object == 'container') {
emit(doc.owner, 1);
}
}
'''
reducer = '''
function(keys, values) {
var a = [], l = keys.length;
for(var i=0; i<l; i++) {
for(var j=i+1; j<l; j++)
if (keys[i][0] === keys[j][0]) j = ++i;
a.push(keys[i][0]);
}
return a;
}
'''
res = list(self.db.query(dirn_fun, reduce_fun=reducer, options='group=true'))
return res[0].value if res[0].value is not None else []
def find_by_property(self, property_name, property_value, object_type=None, fields=None):
comparision_string = 'true'
if object_type is not None:
comparision_string = "doc.object == '%s'" % object_type
if fields is not None:
fields = '{' + ','.join([f + ': doc.' + f for f in fields]) + '}'
else:
fields = 'null'
fnm_fun = '''function(doc) {
if (doc.%s == '%s' && %s ) {
emit(doc.id, %s);
}
}
''' % (property_name, property_value, comparision_string, fields)
res = self.db.query(fnm_fun)
if len(res) == 0:
return (None, {})
elif len(res) > 1:
raise InternalError("Namespace collision: more than one UID corresponds to an object.")
else:
tmp_res = list(res)[0]
return (tmp_res.id, tmp_res.value)
|
BSD 3-Clause New or Revised License
|
home-assistant-libs/zwave-js-server-python
|
zwave_js_server/model/command_class.py
|
CommandClassInfo.is_secure
|
python
|
def is_secure(self) -> bool:
return self.data["isSecure"]
|
Return if the CommandClass is used securely on this node/endpoint.
|
https://github.com/home-assistant-libs/zwave-js-server-python/blob/47c007b5337c4272713ba0b8bce6ca644ddf1489/zwave_js_server/model/command_class.py#L44-L46
|
from typing import TypedDict
from ..const import CommandClass
class CommandClassInfoDataType(TypedDict):
id: int
name: str
version: int
isSecure: bool
class CommandClassInfo:
def __init__(self, data: CommandClassInfoDataType) -> None:
self.data = data
@property
def id(self) -> CommandClass:
return CommandClass(self.data["id"])
@property
def name(self) -> str:
return self.data["name"]
@property
def version(self) -> int:
return self.data["version"]
@property
|
Apache License 2.0
|
zzzeek/sqlalchemy
|
lib/sqlalchemy/orm/relationships.py
|
foreign
|
python
|
def foreign(expr):
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
)
|
Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
|
https://github.com/zzzeek/sqlalchemy/blob/979ea6b21f71605314dc0ac1231dd385eced98c4/lib/sqlalchemy/orm/relationships.py#L72-L89
|
from __future__ import absolute_import
import collections
import re
import weakref
from . import attributes
from .base import _is_mapped_class
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
def remote(expr):
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
)
|
MIT License
|
yandex-cloud/python-sdk
|
yandex/cloud/compute/v1/instancegroup/instance_group_service_pb2_grpc.py
|
InstanceGroupServiceServicer.Update
|
python
|
def Update(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
Updates the specified instance group.
This method starts an operation that can be cancelled by another operation.
|
https://github.com/yandex-cloud/python-sdk/blob/6ddaaaf0ad01d8fc36cb72957f70a6e7943a5ce7/yandex/cloud/compute/v1/instancegroup/instance_group_service_pb2_grpc.py#L154-L160
|
import grpc
from yandex.cloud.access import access_pb2 as yandex_dot_cloud_dot_access_dot_access__pb2
from yandex.cloud.compute.v1.instancegroup import instance_group_pb2 as yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__pb2
from yandex.cloud.compute.v1.instancegroup import instance_group_service_pb2 as yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class InstanceGroupServiceStub(object):
def __init__(self, channel):
self.Get = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Get',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.GetInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__pb2.InstanceGroup.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/List',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupsResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Create',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.CreateInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.CreateFromYaml = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/CreateFromYaml',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.CreateInstanceGroupFromYamlRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Update',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.UpdateInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateFromYaml = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/UpdateFromYaml',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.UpdateInstanceGroupFromYamlRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Stop = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Stop',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.StopInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Start = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Start',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.StartInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/Delete',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.DeleteInstanceGroupRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListInstances = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/ListInstances',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupInstancesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupInstancesResponse.FromString,
)
self.DeleteInstances = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/DeleteInstances',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.DeleteInstancesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.StopInstances = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/StopInstances',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.StopInstancesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/ListOperations',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupOperationsResponse.FromString,
)
self.ListLogRecords = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/ListLogRecords',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupLogRecordsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ListInstanceGroupLogRecordsResponse.FromString,
)
self.ListAccessBindings = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/ListAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.FromString,
)
self.SetAccessBindings = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/SetAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateAccessBindings = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/UpdateAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ResumeProcesses = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/ResumeProcesses',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.ResumeInstanceGroupProcessesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.PauseProcesses = channel.unary_unary(
'/yandex.cloud.compute.v1.instancegroup.InstanceGroupService/PauseProcesses',
request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_instancegroup_dot_instance__group__service__pb2.PauseInstanceGroupProcessesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class InstanceGroupServiceServicer(object):
def Get(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateFromYaml(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
MIT License
|
pandaxcentric/game_apis
|
game_apis/rest/pubg.py
|
Pubg.get_players
|
python
|
def get_players(self, filter, players):
return self._get('/players?filter[{}]={}'.format(filter, ",".join(players)))
|
filter: either playerIds or playerNames
players: list of string, pass either ids or names depending on the filter
|
https://github.com/pandaxcentric/game_apis/blob/fb7dfc35f89334e83e15e518fa776b4a1ca87476/game_apis/rest/pubg.py#L62-L67
|
import requests
from game_apis.rest.api import API
from game_apis.log import get_logger
LOG = get_logger('rest', 'rest.log')
class Pubg(API):
ID = 'PUBG'
LIMIT = 6
def __init__(self, config, region=None, sandbox=False, local_config=False, ignore_limiter=False):
super().__init__(config, sandbox, local_config, ignore_limiter)
if region == None:
region = 'pc-na'
self.rest_api = "https://api.pubg.com/shards/{}".format(region)
def _get(self, command: str, options = None):
headers = {
'Authorization': 'Bearer {}'.format(self.key_id),
'Accept': 'application/vnd.api+json'
}
if options is None:
options = {}
base_url = "{}{}".format(self.rest_api, command)
for key, val in options.items():
if "?" not in base_url:
base_url = "{}?{}={}".format(base_url, key, val)
continue
base_url = "{}&{}={}".format(base_url, key, val)
self.check_limiter()
resp = requests.get(base_url, headers = headers)
self.reset_limiter()
if resp.status_code != 200:
LOG.error("%s: Status code %d", self.ID, resp.status_code)
LOG.error("%s: Headers: %s", self.ID, resp.headers)
LOG.error("%s: Resp: %s", self.ID, resp.text)
resp.raise_for_status()
return resp.json()
def samples(self, parameters = None):
return self._get('/samples', parameters)
|
MIT License
|
flyte/mqtt-io
|
mqtt_io/server.py
|
MqttIo.interrupt_callback
|
python
|
def interrupt_callback(
self,
module: GenericGPIO,
pin: PinType,
*args: Any,
**kwargs: Any,
) -> None:
pin_name = module.pin_configs[pin]["name"]
if not self.running.is_set():
_LOG.warning(
"Ignored interrupt from pin %r as we're not fully initialised", pin_name
)
return
interrupt_lock = self.interrupt_locks[pin_name]
if not interrupt_lock.acquire(blocking=False):
_LOG.warning(
(
"Ignoring interrupt on pin '%s' because we're already busy "
"processing one."
),
pin_name,
)
return
remote_interrupt_for_pin_names: List[str] = []
try:
_LOG.info("Handling interrupt callback on pin '%s'", pin_name)
remote_interrupt_for_pin_names = module.remote_interrupt_for(pin)
if remote_interrupt_for_pin_names:
_LOG.debug("Interrupt on '%s' triggered remote interrupt.", pin_name)
self.handle_remote_interrupt(
remote_interrupt_for_pin_names, interrupt_lock
)
return
_LOG.debug("Interrupt is for the '%s' pin itself", pin_name)
value = module.get_interrupt_value(pin, *args, **kwargs)
self.event_bus.fire(DigitalInputChangedEvent(pin_name, None, value))
finally:
if not remote_interrupt_for_pin_names:
interrupt_lock.release()
|
This function is passed in to any GPIO library that provides software callbacks
called on interrupt. It's passed to the GPIO library's interrupt setup function
with its 'module' and 'pin' parameters already filled by partial(), so that
any *args and **kwargs supplied by the GPIO library will get passed directly
back to our GPIO module's get_interrupt_value() method.
If the pin is configured as a remote interrupt for another pin or pins, then the
execution, along with the interrupt lock is handed off to
self.handle_remote_interrupt(), instead of getting the pin value, firing the
DigitalInputChangedEvent and unlocking the interrupt lock.
This can potentially be called from any thread.
|
https://github.com/flyte/mqtt-io/blob/6297a22f1200a35f890c2487428ee84ab2c671cd/mqtt_io/server.py#L765-L825
|
import asyncio
import logging
import re
import signal as signals
import threading
from asyncio.queues import QueueEmpty
from functools import partial
from hashlib import sha1
from importlib import import_module
from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload
import backoff
from typing_extensions import Literal
from .config import (
get_main_schema_section,
validate_and_normalise_config,
validate_and_normalise_digital_input_config,
validate_and_normalise_digital_output_config,
validate_and_normalise_sensor_input_config,
)
from .constants import (
INPUT_TOPIC,
MODULE_CLASS_NAMES,
MODULE_IMPORT_PATH,
MQTT_ANNOUNCE_PRIORITY,
MQTT_PUB_PRIORITY,
MQTT_SUB_PRIORITY,
OUTPUT_TOPIC,
SEND_SUFFIX,
SENSOR_TOPIC,
SET_OFF_MS_SUFFIX,
SET_ON_MS_SUFFIX,
SET_SUFFIX,
STREAM_TOPIC,
)
from .events import (
DigitalInputChangedEvent,
DigitalOutputChangedEvent,
EventBus,
SensorReadEvent,
StreamDataReadEvent,
StreamDataSentEvent,
)
from .home_assistant import (
hass_announce_digital_input,
hass_announce_digital_output,
hass_announce_sensor_input,
)
from .modules import install_missing_module_requirements
from .modules.gpio import GenericGPIO, InterruptEdge, InterruptSupport, PinDirection
from .modules.sensor import GenericSensor
from .modules.stream import GenericStream
from .mqtt import (
AbstractMQTTClient,
MQTTClientOptions,
MQTTException,
MQTTMessageSend,
MQTTTLSOptions,
MQTTWill,
)
from .types import ConfigType, PinType, SensorValueType
from .utils import PriorityCoro, create_unawaited_task_threadsafe
_LOG = logging.getLogger(__name__)
@overload
def _init_module(
module_config: Dict[str, Dict[str, Any]],
module_type: Literal["gpio"],
install_requirements: bool,
) -> GenericGPIO:
...
@overload
def _init_module(
module_config: Dict[str, Dict[str, Any]],
module_type: Literal["sensor"],
install_requirements: bool,
) -> GenericSensor:
...
@overload
def _init_module(
module_config: Dict[str, Dict[str, Any]],
module_type: Literal["stream"],
install_requirements: bool,
) -> GenericStream:
...
def _init_module(
module_config: Dict[str, Dict[str, Any]], module_type: str, install_requirements: bool
) -> Union[GenericGPIO, GenericSensor, GenericStream]:
module = import_module(
"%s.%s.%s" % (MODULE_IMPORT_PATH, module_type, module_config["module"])
)
module_schema = get_main_schema_section(f"{module_type}_modules")
module_schema.update(getattr(module, "CONFIG_SCHEMA", {}))
module_config = validate_and_normalise_config(module_config, module_schema)
if install_requirements:
install_missing_module_requirements(module)
module_class: Type[Union[GenericGPIO, GenericSensor, GenericStream]] = getattr(
module, MODULE_CLASS_NAMES[module_type]
)
return module_class(module_config)
def output_name_from_topic(topic: str, prefix: str, topic_type: str) -> str:
match = re.match(f"^{prefix}/{topic_type}/(.+?)/.+$", topic)
if match is None:
raise ValueError("Topic %r does not adhere to expected structure" % topic)
return match.group(1)
class MqttIo:
def __init__(
self, config: Dict[str, Any], loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
self.config = config
self._init_mqtt_config()
self.running: threading.Event = threading.Event()
self.gpio_configs: Dict[str, ConfigType] = {}
self.digital_input_configs: Dict[str, ConfigType] = {}
self.digital_output_configs: Dict[str, ConfigType] = {}
self.gpio_modules: Dict[str, GenericGPIO] = {}
self.sensor_configs: Dict[str, ConfigType] = {}
self.sensor_input_configs: Dict[str, ConfigType] = {}
self.sensor_modules: Dict[str, GenericSensor] = {}
self.stream_configs: Dict[str, ConfigType] = {}
self.stream_modules: Dict[str, GenericStream] = {}
self.stream_output_queues = {}
self.gpio_output_queues = (
{}
)
self.loop = loop or asyncio.get_event_loop()
self._main_task: Optional["asyncio.Task[None]"] = None
self.critical_tasks: List["asyncio.Task[Any]"] = []
self.transient_tasks: List["asyncio.Task[Any]"] = []
self.event_bus = EventBus(self.loop, self.transient_tasks)
self.mqtt: Optional[AbstractMQTTClient] = None
self.interrupt_locks: Dict[str, threading.Lock] = {}
self.mqtt_task_queue: "asyncio.PriorityQueue[PriorityCoro]"
self.mqtt_connected: asyncio.Event
async def create_loop_resources() -> None:
self.mqtt_task_queue = asyncio.PriorityQueue()
self.mqtt_connected = asyncio.Event()
self.loop.run_until_complete(create_loop_resources())
def _init_mqtt_config(self) -> None:
config: ConfigType = self.config["mqtt"]
topic_prefix: str = config["topic_prefix"]
client_id: Optional[str] = config["client_id"]
if not client_id:
client_id = "mqtt-io-%s" % sha1(topic_prefix.encode("utf8")).hexdigest()
tls_enabled: bool = config.get("tls", {}).get("enabled")
tls_options = None
if tls_enabled:
tls_options = MQTTTLSOptions(
ca_certs=config["tls"].get("ca_certs"),
certfile=config["tls"].get("certfile"),
keyfile=config["tls"].get("keyfile"),
ciphers=config["tls"].get("ciphers"),
)
self.mqtt_client_options = MQTTClientOptions(
hostname=config["host"],
port=config["port"],
username=config["user"],
password=config["password"],
client_id=client_id,
keepalive=config["keepalive"],
clean_session=config["clean_session"],
tls_options=tls_options,
will=MQTTWill(
topic="/".join((topic_prefix, config["status_topic"])),
payload=config["status_payload_dead"].encode("utf8"),
qos=1,
retain=True,
),
)
def _init_gpio_modules(self) -> None:
self.gpio_configs = {x["name"]: x for x in self.config["gpio_modules"]}
self.gpio_modules = {}
for gpio_config in self.config["gpio_modules"]:
self.gpio_modules[gpio_config["name"]] = _init_module(
gpio_config, "gpio", self.config["options"]["install_requirements"]
)
def _init_sensor_modules(self) -> None:
self.sensor_configs = {x["name"]: x for x in self.config["sensor_modules"]}
self.sensor_modules = {}
for sens_config in self.config["sensor_modules"]:
self.sensor_modules[sens_config["name"]] = _init_module(
sens_config, "sensor", self.config["options"]["install_requirements"]
)
def _init_stream_modules(self) -> None:
async def publish_stream_data_callback(event: StreamDataReadEvent) -> None:
stream_conf = self.stream_configs[event.stream_name]
self.mqtt_task_queue.put_nowait(
PriorityCoro(
self._mqtt_publish(
MQTTMessageSend(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
STREAM_TOPIC,
stream_conf["name"],
)
),
event.data,
retain=stream_conf["retain"],
)
),
MQTT_PUB_PRIORITY,
)
)
self.event_bus.subscribe(StreamDataReadEvent, publish_stream_data_callback)
self.stream_configs = {x["name"]: x for x in self.config["stream_modules"]}
self.stream_modules = {}
sub_topics: List[str] = []
for stream_conf in self.config["stream_modules"]:
stream_module = _init_module(
stream_conf, "stream", self.config["options"]["install_requirements"]
)
self.stream_modules[stream_conf["name"]] = stream_module
self.transient_tasks.append(
self.loop.create_task(self.stream_poller(stream_module, stream_conf))
)
async def create_stream_output_queue(
stream_conf: ConfigType = stream_conf,
) -> None:
queue = asyncio.Queue()
self.stream_output_queues[stream_conf["name"]] = queue
self.loop.run_until_complete(create_stream_output_queue())
self.transient_tasks.append(
self.loop.create_task(
partial(
self.stream_output_loop,
stream_module,
stream_conf,
self.stream_output_queues[stream_conf["name"]],
)()
)
)
sub_topics.append(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
STREAM_TOPIC,
stream_conf["name"],
SEND_SUFFIX,
)
)
)
if sub_topics:
self.mqtt_task_queue.put_nowait(
PriorityCoro(self._mqtt_subscribe(sub_topics), MQTT_SUB_PRIORITY)
)
def _init_digital_inputs(self) -> None:
async def publish_callback(event: DigitalInputChangedEvent) -> None:
in_conf = self.digital_input_configs[event.input_name]
value = event.to_value != in_conf["inverted"]
val = in_conf["on_payload"] if value else in_conf["off_payload"]
self.mqtt_task_queue.put_nowait(
PriorityCoro(
self._mqtt_publish(
MQTTMessageSend(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
INPUT_TOPIC,
event.input_name,
)
),
val.encode("utf8"),
retain=in_conf["retain"],
)
),
MQTT_PUB_PRIORITY,
)
)
self.event_bus.subscribe(DigitalInputChangedEvent, publish_callback)
for in_conf in self.config["digital_inputs"]:
gpio_module = self.gpio_modules[in_conf["module"]]
in_conf = validate_and_normalise_digital_input_config(in_conf, gpio_module)
self.digital_input_configs[in_conf["name"]] = in_conf
gpio_module.setup_pin_internal(PinDirection.INPUT, in_conf)
interrupt = in_conf.get("interrupt")
interrupt_for = in_conf.get("interrupt_for")
if interrupt is None or (
interrupt_for and in_conf["poll_when_interrupt_for"]
):
self.transient_tasks.append(
self.loop.create_task(
partial(self.digital_input_poller, gpio_module, in_conf)()
)
)
if interrupt:
edge = {
"rising": InterruptEdge.RISING,
"falling": InterruptEdge.FALLING,
"both": InterruptEdge.BOTH,
}[interrupt]
callback = None
if gpio_module.INTERRUPT_SUPPORT & InterruptSupport.SOFTWARE_CALLBACK:
self.interrupt_locks[in_conf["name"]] = threading.Lock()
callback = partial(
self.interrupt_callback, gpio_module, in_conf["pin"]
)
gpio_module.setup_interrupt_internal(
in_conf["pin"], edge, in_conf, callback=callback
)
def _init_digital_outputs(self) -> None:
async def publish_callback(event: DigitalOutputChangedEvent) -> None:
out_conf = self.digital_output_configs[event.output_name]
val = out_conf["on_payload"] if event.to_value else out_conf["off_payload"]
self.mqtt_task_queue.put_nowait(
PriorityCoro(
self._mqtt_publish(
MQTTMessageSend(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
"output",
event.output_name,
)
),
val.encode("utf8"),
retain=out_conf["retain"],
)
),
MQTT_PUB_PRIORITY,
)
)
self.event_bus.subscribe(DigitalOutputChangedEvent, publish_callback)
for out_conf in self.config["digital_outputs"]:
gpio_module = self.gpio_modules[out_conf["module"]]
out_conf = validate_and_normalise_digital_output_config(out_conf, gpio_module)
self.digital_output_configs[out_conf["name"]] = out_conf
gpio_module.setup_pin_internal(PinDirection.OUTPUT, out_conf)
if out_conf["module"] not in self.gpio_output_queues:
async def create_digital_output_queue(
out_conf: ConfigType = out_conf,
) -> None:
queue = asyncio.Queue()
self.gpio_output_queues[out_conf["module"]] = queue
self.loop.run_until_complete(create_digital_output_queue())
self.transient_tasks.append(
self.loop.create_task(
partial(
self.digital_output_loop,
gpio_module,
self.gpio_output_queues[out_conf["module"]],
)()
)
)
topics = []
for suffix in (SET_SUFFIX, SET_ON_MS_SUFFIX, SET_OFF_MS_SUFFIX):
topics.append(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
OUTPUT_TOPIC,
out_conf["name"],
suffix,
)
)
)
self.mqtt_task_queue.put_nowait(
PriorityCoro(self._mqtt_subscribe(topics), MQTT_SUB_PRIORITY)
)
if out_conf["publish_initial"]:
self.event_bus.fire(
DigitalOutputChangedEvent(
out_conf["name"],
out_conf["initial"]
== ("low" if out_conf["inverted"] else "high"),
)
)
def _init_sensor_inputs(self) -> None:
async def publish_sensor_callback(event: SensorReadEvent) -> None:
sens_conf = self.sensor_input_configs[event.sensor_name]
digits: int = sens_conf["digits"]
self.mqtt_task_queue.put_nowait(
PriorityCoro(
self._mqtt_publish(
MQTTMessageSend(
"/".join(
(
self.config["mqtt"]["topic_prefix"],
SENSOR_TOPIC,
event.sensor_name,
)
),
f"{event.value:.{digits}f}".encode("utf8"),
retain=sens_conf["retain"],
)
),
MQTT_PUB_PRIORITY,
)
)
self.event_bus.subscribe(SensorReadEvent, publish_sensor_callback)
for sens_conf in self.config["sensor_inputs"]:
sensor_module = self.sensor_modules[sens_conf["module"]]
sens_conf = validate_and_normalise_sensor_input_config(
sens_conf, sensor_module
)
self.sensor_input_configs[sens_conf["name"]] = sens_conf
sensor_module.setup_sensor(sens_conf)
async def poll_sensor(
sensor_module: GenericSensor = sensor_module,
sens_conf: ConfigType = sens_conf,
) -> None:
@backoff.on_exception(
backoff.expo, Exception, max_time=sens_conf["interval"]
)
@backoff.on_predicate(
backoff.expo, lambda x: x is None, max_time=sens_conf["interval"]
)
async def get_sensor_value(
sensor_module: GenericSensor = sensor_module,
sens_conf: ConfigType = sens_conf,
) -> SensorValueType:
return await sensor_module.async_get_value(sens_conf)
while True:
value = None
try:
value = await get_sensor_value()
except Exception:
_LOG.exception(
"Exception when retrieving value from sensor %r:",
sens_conf["name"],
)
if value is not None:
value = round(value, sens_conf["digits"])
_LOG.info(
"Read sensor '%s' value of %s", sens_conf["name"], value
)
self.event_bus.fire(SensorReadEvent(sens_conf["name"], value))
await asyncio.sleep(sens_conf["interval"])
self.transient_tasks.append(self.loop.create_task(poll_sensor()))
async def _connect_mqtt(self) -> None:
config: ConfigType = self.config["mqtt"]
topic_prefix: str = config["topic_prefix"]
self.mqtt = AbstractMQTTClient.get_implementation(config["client_module"])(
self.mqtt_client_options
)
_LOG.info("Connecting to MQTT...")
await self.mqtt.connect()
_LOG.info("Connected to MQTT")
self.mqtt_task_queue.put_nowait(
PriorityCoro(
self._mqtt_publish(
MQTTMessageSend(
"/".join((topic_prefix, config["status_topic"])),
config["status_payload_running"].encode("utf8"),
qos=1,
retain=True,
)
),
MQTT_PUB_PRIORITY,
)
)
self.mqtt_connected.set()
def _ha_discovery_announce(self) -> None:
messages: List[MQTTMessageSend] = []
mqtt_config: ConfigType = self.config["mqtt"]
for in_conf in self.digital_input_configs.values():
messages.append(
hass_announce_digital_input(
in_conf, mqtt_config, self.mqtt_client_options
)
)
for out_conf in self.digital_output_configs.values():
messages.append(
hass_announce_digital_output(
out_conf, mqtt_config, self.mqtt_client_options
)
)
for sens_conf in self.sensor_input_configs.values():
messages.append(
hass_announce_sensor_input(
sens_conf, mqtt_config, self.mqtt_client_options
)
)
for msg in messages:
self.mqtt_task_queue.put_nowait(
PriorityCoro(self._mqtt_publish(msg), MQTT_ANNOUNCE_PRIORITY)
)
async def _mqtt_subscribe(self, topics: List[str]) -> None:
if not self.mqtt_connected.is_set():
_LOG.debug("_mqtt_subscribe awaiting MQTT connection")
await self.mqtt_connected.wait()
_LOG.debug("_mqtt_subscribe unblocked after MQTT connection")
if self.mqtt is None:
raise RuntimeError("MQTT client was None when trying to subscribe.")
await self.mqtt.subscribe([(topic, 1) for topic in topics])
for topic in topics:
_LOG.info("Subscribed to topic: %r", topic)
async def _mqtt_publish(self, msg: MQTTMessageSend, wait: bool = True) -> None:
if not self.mqtt_connected.is_set():
if wait:
_LOG.debug("_mqtt_publish awaiting MQTT connection")
await self.mqtt_connected.wait()
_LOG.debug("_mqtt_publish unblocked after MQTT connection")
if self.mqtt is None:
raise RuntimeError("MQTT client was None when trying to publish.")
if msg.payload is None:
_LOG.debug("Publishing MQTT message on topic %r with no payload", msg.topic)
else:
try:
payload_str = msg.payload.decode("utf8")
except UnicodeDecodeError:
_LOG.debug(
"Publishing MQTT message on topic %r with non-unicode payload",
msg.topic,
)
else:
_LOG.debug(
"Publishing MQTT message on topic %r: %r", msg.topic, payload_str
)
await self.mqtt.publish(msg)
async def _handle_digital_input_value(
self,
in_conf: ConfigType,
value: bool,
last_value: Optional[bool],
) -> None:
if value != last_value:
_LOG.info("Digital input '%s' value changed to %s", in_conf["name"], value)
self.event_bus.fire(
DigitalInputChangedEvent(in_conf["name"], last_value, value)
)
interrupt = in_conf.get("interrupt")
interrupt_for = in_conf.get("interrupt_for")
if not interrupt or not interrupt_for:
return
if not any(
(
interrupt == "rising" and value,
interrupt == "falling" and not value,
)
):
return
interrupt_lock = self.interrupt_locks[in_conf["name"]]
if not interrupt_lock.acquire(blocking=False):
_LOG.debug(
(
"Polled an interrupt value on pin '%s', but we're "
"not triggering the remote interrupt because we're "
"already handling it."
),
in_conf["name"],
)
return
_LOG.debug(
"Polled value of %s on '%s' triggered remote interrupt",
value,
in_conf["name"],
)
self.handle_remote_interrupt(interrupt_for, interrupt_lock)
async def digital_input_poller(
self, module: GenericGPIO, in_conf: ConfigType
) -> None:
last_value: Optional[bool] = None
while True:
value = await module.async_get_pin(in_conf["pin"])
await self._handle_digital_input_value(in_conf, value, last_value)
last_value = value
await asyncio.sleep(in_conf["poll_interval"])
async def stream_poller(self, module: GenericStream, stream_conf: ConfigType) -> None:
while True:
try:
data = await module.async_read()
except Exception:
_LOG.exception(
"Exception while polling stream '%s':", stream_conf["name"]
)
else:
if data is not None:
self.event_bus.fire(StreamDataReadEvent(stream_conf["name"], data))
await asyncio.sleep(stream_conf["read_interval"])
|
MIT License
|
opendoor-labs/rets
|
rets/http/client.py
|
RetsHttpClient.cookie_dict
|
python
|
def cookie_dict(self) -> dict:
cookie_d = {}
for k, v in self._session.cookies.iteritems():
cookie_d[k] = v
return cookie_d
|
Keeps the last value in case of duplicate keys.
|
https://github.com/opendoor-labs/rets/blob/8933a2b4fb015644105dc078a3118f8e47f694f0/rets/http/client.py#L93-L98
|
from hashlib import md5
from typing import Any, Mapping, Sequence, Union
from urllib.parse import urljoin, urlsplit, urlunsplit, urlencode
import requests
from requests import Response
from requests.auth import AuthBase, HTTPBasicAuth, HTTPDigestAuth
from rets.http.parsers import (
parse_capability_urls,
parse_metadata,
parse_object,
parse_search,
parse_system,
)
from rets.http.data import Object, Metadata, SearchResult, SystemMetadata
from rets.errors import RetsApiError, RetsClientError
class RetsHttpClient:
def __init__(self,
login_url: str,
username: str = None,
password: str = None,
auth_type: str = 'digest',
user_agent: str = 'rets-python/0.3',
user_agent_password: str = '',
rets_version: str = '1.7.2',
capability_urls: str = None,
cookie_dict: dict = None,
use_get_method: bool = False,
send_rets_ua_authorization: bool = True,
):
self._user_agent = user_agent
self._user_agent_password = user_agent_password
self._rets_version = rets_version
self._use_get_method = use_get_method
self._send_rets_ua_authorization = send_rets_ua_authorization
splits = urlsplit(login_url)
self._base_url = urlunsplit((splits.scheme, splits.netloc, '', '', ''))
self._capabilities = capability_urls or {
'Login': splits.path,
}
if username and password:
self._http_auth = _get_http_auth(username, password, auth_type)
else:
self._http_auth = None
self._session = requests.Session()
if cookie_dict:
for name, value in cookie_dict.items():
self._session.cookies.set(name, value=value)
self._rets_session_id = ''
@property
def user_agent(self) -> str:
return self._user_agent
@property
def rets_version(self) -> str:
return 'RETS/' + self._rets_version
@property
def capability_urls(self) -> dict:
return self._capabilities
@property
|
MIT License
|
hustvl/bmaskr-cnn
|
detectron2/utils/visualizer.py
|
Visualizer.draw_binary_mask
|
python
|
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0
):
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8")
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba)
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
largest_component_id = np.argmax(stats[1:, -1]) + 1
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=lighter_color)
return self.output
|
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn in the object's center of mass.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component small than this will not be shown.
Returns:
output (VisImage): image object with mask drawn.
|
https://github.com/hustvl/bmaskr-cnn/blob/c74b0bd3ed47bf4aaa5c211c4e31eddc78fdc636/detectron2/utils/visualizer.py#L974-L1031
|
import colorsys
import logging
import math
import numpy as np
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import pycocotools.mask as mask_util
import torch
from fvcore.common.file_io import PathManager
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from detectron2.data import MetadataCatalog
from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
IMAGE = 0
SEGMENTATION = 1
IMAGE_BW = 2
class GenericMask:
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
assert "counts" in m and "size" in m
if isinstance(m["counts"], list):
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list):
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray):
assert m.shape[1] != 2, m.shape
assert m.shape == (height, width), m.shape
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False
return self._has_holes
def mask_to_polygons(self, mask):
mask = np.ascontiguousarray(mask)
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None:
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
res = [x for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
def __init__(self, panoptic_seg, segments_info):
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info}
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names):
labels = None
if classes is not None and class_names is not None and len(class_names) > 1:
labels = [class_names[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
ax.set_xlim(0.0, self.width)
ax.set_ylim(self.height)
self.fig = fig
self.ax = ax
def save(self, filepath):
if filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"):
cv2.imwrite(filepath, self.get_image()[:, :, ::-1])
else:
self.ax.imshow(self.img, interpolation="nearest")
self.fig.savefig(filepath)
def get_image(self):
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
if (self.width, self.height) != (width, height):
img = cv2.resize(self.img, (width, height))
else:
img = self.img
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
try:
import numexpr as ne
visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
except ImportError:
alpha = alpha.astype("float32") / 255.0
visualized_image = img * (1 - alpha) + rgb * alpha
visualized_image = visualized_image.astype("uint8")
return visualized_image
class Visualizer:
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
def draw_instance_predictions(self, predictions):
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg_predictions(
self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7
):
pred = _PanopticPrediction(panoptic_seg, segments_info)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(pred.non_empty_mask())
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)
try:
colors = [random_color(rgb=True, maximum=1) for k in category_ids]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
def draw_dataset_dict(self, dic):
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos]
labels = [x["category_id"] for x in annos]
colors = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in labels
]
names = self.metadata.get("thing_classes", None)
if names:
labels = [names[i] for i in labels]
labels = [
"{}".format(i) + ("|crowd" if a.get("iscrowd", 0) else "")
for i, a in zip(labels, annos)
]
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0)
horiz_align = "left"
elif masks is not None:
x0, y0, x1, y1 = masks[i].bbox()
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
x, y, prob = keypoint
if prob > _KEYPOINT_THRESHOLD:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0
):
if not font_size:
font_size = self._default_font_size
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1]
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
|
Apache License 2.0
|
cupy/cupy
|
cupyx/scipy/sparse/compressed.py
|
_compressed_sparse_matrix._insert_many
|
python
|
def _insert_many(self, i, j, x):
order = cupy.argsort(i)
i = i.take(order)
j = j.take(order)
x = x.take(order)
idx_dtype = sputils.get_index_dtype(
(self.indices, self.indptr), maxval=(
self.nnz + x.size))
self.indptr = self.indptr.astype(idx_dtype)
self.indices = self.indices.astype(idx_dtype)
self.data = self.data.astype(self.dtype)
indptr_inserts, indices_inserts, data_inserts = _index._select_last_indices(i, j, x, idx_dtype)
rows, ui_indptr = cupy.unique(indptr_inserts, return_index=True)
to_add = cupy.empty(ui_indptr.size+1, ui_indptr.dtype)
to_add[-1] = j.size
to_add[:-1] = ui_indptr
ui_indptr = to_add
row_counts = cupy.zeros(ui_indptr.size-1, dtype=idx_dtype)
cupyx.scatter_add(
row_counts, cupy.searchsorted(rows, indptr_inserts), 1)
self._perform_insert(indices_inserts, data_inserts,
rows, row_counts, idx_dtype)
|
Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
|
https://github.com/cupy/cupy/blob/a466b03ef0afd7c1ce1615e3f48da64ae38c1320/cupyx/scipy/sparse/compressed.py#L607-L647
|
import string
import warnings
import numpy
try:
import scipy.sparse
scipy_available = True
except ImportError:
scipy_available = False
import cupy
import cupyx
from cupy import _core
from cupy._core import _scalar
from cupy._creation import basic
from cupy import cusparse
from cupyx.scipy.sparse import base
from cupyx.scipy.sparse import coo
from cupyx.scipy.sparse import data as sparse_data
from cupyx.scipy.sparse import sputils
from cupyx.scipy.sparse import _util
from cupyx.scipy.sparse import _index
class _compressed_sparse_matrix(sparse_data._data_matrix,
sparse_data._minmax_mixin,
_index.IndexMixin):
_max_min_reduction_code = r'''
extern "C" __global__
void ${func}(double* data, int* x, int* y, int length,
double* z) {
// Get the index of the block
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate the block length
int block_length = y[tid] - x[tid];
// Select initial value based on the block density
double running_value = 0;
if (${cond}){
running_value = data[x[tid]];
} else {
running_value = 0;
}
// Iterate over the block and update
for (int entry = x[tid]; entry < y[tid]; entry++){
if (data[entry] != data[entry]){
// Check for NaN
running_value = nan("");
break;
} else {
// Check for a value update
if (data[entry] ${op} running_value){
running_value = data[entry];
}
}
}
// Store in the return function
z[tid] = running_value;
}'''
_max_reduction_kern = _core.RawKernel(
string.Template(_max_min_reduction_code).substitute(
func='max_reduction', op='>', cond='block_length == length'),
'max_reduction')
_max_nonzero_reduction_kern = _core.RawKernel(
string.Template(_max_min_reduction_code).substitute(
func='max_nonzero_reduction', op='>', cond='block_length > 0'),
'max_nonzero_reduction')
_min_reduction_kern = _core.RawKernel(
string.Template(_max_min_reduction_code).substitute(
func='min_reduction', op='<', cond='block_length == length'),
'min_reduction')
_min_nonzero_reduction_kern = _core.RawKernel(
string.Template(_max_min_reduction_code).substitute(
func='min_nonzero_reduction', op='<', cond='block_length > 0'),
'min_nonzero_reduction')
_argmax_argmin_code = r'''
template<typename T1, typename T2> __global__ void
${func}_arg_reduction(T1* data, int* indices, int* x, int* y,
int length, T2* z) {
// Get the index of the block
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate the block length
int block_length = y[tid] - x[tid];
// Select initial value based on the block density
int data_index = 0;
double data_value = 0;
if (block_length == length){
// Block is dense. Fill the first value
data_value = data[x[tid]];
data_index = indices[x[tid]];
} else if (block_length > 0) {
// Block has at least one zero. Assign first occurrence as the
// starting reference
data_value = 0;
for (data_index = 0; data_index < length; data_index++){
if (data_index != indices[x[tid] + data_index] ||
x[tid] + data_index >= y[tid]){
break;
}
}
} else {
// Zero valued array
data_value = 0;
data_index = 0;
}
// Iterate over the section of the sparse matrix
for (int entry = x[tid]; entry < y[tid]; entry++){
if (data[entry] != data[entry]){
// Check for NaN
data_value = nan("");
data_index = 0;
break;
} else {
// Check for a value update
if (data[entry] ${op} data_value){
data_index = indices[entry];
data_value = data[entry];
}
}
}
// Store in the return function
z[tid] = data_index;
}'''
_max_arg_reduction_mod = _core.RawModule(
code=string.Template(_argmax_argmin_code).substitute(
func='max', op='>'),
options=('-std=c++11',),
name_expressions=['max_arg_reduction<float, int>',
'max_arg_reduction<float, long long>',
'max_arg_reduction<double, int>',
'max_arg_reduction<double, long long>'])
_min_arg_reduction_mod = _core.RawModule(
code=string.Template(_argmax_argmin_code).substitute(
func='min', op='<'),
options=('-std=c++11',),
name_expressions=['min_arg_reduction<float, int>',
'min_arg_reduction<float, long long>',
'min_arg_reduction<double, int>',
'min_arg_reduction<double, long long>'])
_has_sorted_indices_kern = _core.ElementwiseKernel(
'raw T indptr, raw T indices',
'bool diff',
'''
bool diff_out = true;
for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {
if (indices[jj] > indices[jj+1]){
diff_out = false;
}
}
diff = diff_out;
''', 'cupyx_scipy_sparse_has_sorted_indices')
_has_canonical_format_kern = _core.ElementwiseKernel(
'raw T indptr, raw T indices',
'bool diff',
'''
bool diff_out = true;
if (indptr[i] > indptr[i+1]) {
diff = false;
return;
}
for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {
if (indices[jj] >= indices[jj+1]) {
diff_out = false;
}
}
diff = diff_out;
''', 'cupyx_scipy_sparse_has_canonical_format')
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if shape is not None:
if not _util.isshape(shape):
raise ValueError('invalid shape (must be a 2-tuple of int)')
shape = int(shape[0]), int(shape[1])
if base.issparse(arg1):
x = arg1.asformat(self.format)
data = x.data
indices = x.indices
indptr = x.indptr
if arg1.format != self.format:
copy = False
if shape is None:
shape = arg1.shape
elif _util.isshape(arg1):
m, n = arg1
m, n = int(m), int(n)
data = basic.zeros(0, dtype if dtype else 'd')
indices = basic.zeros(0, 'i')
indptr = basic.zeros(self._swap(m, n)[0] + 1, dtype='i')
shape = (m, n)
copy = False
elif scipy_available and scipy.sparse.issparse(arg1):
x = arg1.asformat(self.format)
data = cupy.array(x.data)
indices = cupy.array(x.indices, dtype='i')
indptr = cupy.array(x.indptr, dtype='i')
copy = False
if shape is None:
shape = arg1.shape
elif isinstance(arg1, tuple) and len(arg1) == 2:
sp_coo = coo.coo_matrix(arg1, shape=shape, dtype=dtype, copy=copy)
sp_compressed = sp_coo.asformat(self.format)
data = sp_compressed.data
indices = sp_compressed.indices
indptr = sp_compressed.indptr
elif isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if not (base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
elif base.isdense(arg1):
if arg1.ndim > 2:
raise TypeError('expected dimension <= 2 array or matrix')
elif arg1.ndim == 1:
arg1 = arg1[None]
elif arg1.ndim == 0:
arg1 = arg1[None, None]
data, indices, indptr = self._convert_dense(arg1)
copy = False
if shape is None:
shape = arg1.shape
else:
raise ValueError(
'Unsupported initializer format')
if dtype is None:
dtype = data.dtype
else:
dtype = numpy.dtype(dtype)
if dtype.char not in '?fdFD':
raise ValueError(
'Only bool, float32, float64, complex64 and complex128 '
'are supported')
data = data.astype(dtype, copy=copy)
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data, copy=True):
if copy:
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__(
(data, self.indices, self.indptr),
shape=self.shape,
dtype=data.dtype)
def _convert_dense(self, x):
raise NotImplementedError
def _swap(self, x, y):
raise NotImplementedError
def _add_sparse(self, other, alpha, beta):
raise NotImplementedError
def _add(self, other, lhs_negative, rhs_negative):
if cupy.isscalar(other):
if other == 0:
if lhs_negative:
return -self
else:
return self.copy()
else:
raise NotImplementedError(
'adding a nonzero scalar to a sparse matrix is not '
'supported')
elif base.isspmatrix(other):
alpha = -1 if lhs_negative else 1
beta = -1 if rhs_negative else 1
return self._add_sparse(other, alpha, beta)
elif base.isdense(other):
if lhs_negative:
if rhs_negative:
return -self.todense() - other
else:
return other - self.todense()
else:
if rhs_negative:
return self.todense() - other
else:
return self.todense() + other
else:
return NotImplemented
def __add__(self, other):
return self._add(other, False, False)
def __radd__(self, other):
return self._add(other, False, False)
def __sub__(self, other):
return self._add(other, False, True)
def __rsub__(self, other):
return self._add(other, True, False)
def _get_intXint(self, row, col):
major, minor = self._swap(row, col)
data, indices, _ = _index._get_csr_submatrix_major_axis(
self.data, self.indices, self.indptr, major, major + 1)
dtype = data.dtype
res = cupy.zeros((), dtype=dtype)
if dtype.kind == 'c':
_index._compress_getitem_complex_kern(
data.real, data.imag, indices, minor, res.real, res.imag)
else:
_index._compress_getitem_kern(data, indices, minor, res)
return res
def _get_sliceXslice(self, row, col):
major, minor = self._swap(row, col)
copy = major.step in (1, None)
return self._major_slice(major)._minor_slice(minor, copy=copy)
def _get_arrayXarray(self, row, col, not_found_val=0):
idx_dtype = self.indices.dtype
M, N = self._swap(*self.shape)
major, minor = self._swap(row, col)
major = major.astype(idx_dtype, copy=False)
minor = minor.astype(idx_dtype, copy=False)
val = _index._csr_sample_values(
M, N, self.indptr, self.indices, self.data,
major.ravel(), minor.ravel(),
not_found_val)
if major.ndim == 1:
return cupy.expand_dims(val, 0)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
major, minor = self._swap(row, col)
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
_, N = self._swap(*self.shape)
M = idx.size
new_shape = self._swap(M, N)
if self.nnz == 0 or M == 0:
return self.__class__(new_shape)
return self.__class__(
_index._csr_row_index(self.data, self.indices, self.indptr, idx),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
M, _ = self._swap(*self.shape)
N = idx.size
new_shape = self._swap(M, N)
if self.nnz == 0 or N == 0:
return self.__class__(new_shape)
if idx.size * M < self.nnz:
pass
return self._tocsx()._major_index_fancy(idx)._tocsx()
def _major_slice(self, idx, copy=False):
M, N = self._swap(*self.shape)
start, stop, step = idx.indices(M)
if start == 0 and stop == M and step == 1:
return self.copy() if copy else self
M = len(range(start, stop, step))
new_shape = self._swap(M, N)
if step == 1:
if M == 0 or self.nnz == 0:
return self.__class__(new_shape, dtype=self.dtype)
return self.__class__(
_index._get_csr_submatrix_major_axis(
self.data, self.indices, self.indptr, start, stop),
shape=new_shape, copy=copy)
rows = cupy.arange(start, stop, step, dtype=self.indptr.dtype)
return self._major_index_fancy(rows)
def _minor_slice(self, idx, copy=False):
M, N = self._swap(*self.shape)
start, stop, step = idx.indices(N)
if start == 0 and stop == N and step == 1:
return self.copy() if copy else self
N = len(range(start, stop, step))
new_shape = self._swap(M, N)
if N == 0 or self.nnz == 0:
return self.__class__(new_shape)
if step == 1:
return self.__class__(
_index._get_csr_submatrix_minor_axis(
self.data, self.indices, self.indptr, start, stop),
shape=new_shape, copy=False)
cols = cupy.arange(start, stop, step, dtype=self.indices.dtype)
return self._minor_index_fancy(cols)
def _set_intXint(self, row, col, x):
i, j = self._swap(row, col)
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap(row, col)
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
self._zero_many(*self._swap(row, col))
M, N = row.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = cupy.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = cupy.repeat(cupy.arange(M), r.size)
c = cupy.tile(c, M)
x = cupy.tile(x, M)
if broadcast_col:
r = cupy.repeat(r, N)
c = cupy.tile(cupy.arange(N), c.size)
x = cupy.repeat(x, N)
i, j = self._swap(row[r, c], col[r, c])
self._set_many(i, j, x)
def _prepare_indices(self, i, j):
M, N = self._swap(*self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = cupy.array(i, dtype=self.indptr.dtype,
copy=True, ndmin=1).ravel()
j = cupy.array(j, dtype=self.indices.dtype,
copy=True, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
i, j, M, N = self._prepare_indices(i, j)
x = cupy.array(x, dtype=self.dtype, copy=True, ndmin=1).ravel()
new_sp = cupyx.scipy.sparse.csr_matrix(
(cupy.arange(self.nnz, dtype=cupy.float32),
self.indices, self.indptr), shape=(M, N))
offsets = new_sp._get_arrayXarray(
i, j, not_found_val=-1).astype(cupy.int32).ravel()
if -1 not in offsets:
self.data[offsets] = x
return
else:
warnings.warn('Changing the sparsity structure of a '
'{}_matrix is expensive.'
' lil_matrix is more efficient.'.format(self.format))
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
i, j, M, N = self._prepare_indices(i, j)
new_sp = cupyx.scipy.sparse.csr_matrix(
(cupy.arange(self.nnz, dtype=cupy.float32),
self.indices, self.indptr), shape=(M, N))
offsets = new_sp._get_arrayXarray(
i, j, not_found_val=-1).astype(cupy.int32).ravel()
self.data[offsets[offsets > -1]] = 0
def _perform_insert(self, indices_inserts, data_inserts,
rows, row_counts, idx_dtype):
indptr_diff = cupy.diff(self.indptr)
indptr_diff[rows] += row_counts
new_indptr = cupy.empty(self.indptr.shape, dtype=idx_dtype)
new_indptr[0] = idx_dtype(0)
new_indptr[1:] = indptr_diff
cupy.cumsum(new_indptr, out=new_indptr)
out_nnz = int(new_indptr[-1])
new_indices = cupy.empty(out_nnz, dtype=idx_dtype)
new_data = cupy.empty(out_nnz, dtype=self.data.dtype)
new_indptr_lookup = cupy.zeros(new_indptr.size, dtype=idx_dtype)
new_indptr_lookup[1:][rows] = row_counts
cupy.cumsum(new_indptr_lookup, out=new_indptr_lookup)
_index._insert_many_populate_arrays(
indices_inserts, data_inserts, new_indptr_lookup,
self.indptr, self.indices, self.data, new_indptr, new_indices,
new_data, size=self.indptr.size-1)
self.indptr = new_indptr
self.indices = new_indices
self.data = new_data
|
MIT License
|
openstack/cinder
|
cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py
|
FJDXCommon._delete_volume
|
python
|
def _delete_volume(self, vol_instance):
LOG.debug('_delete_volume, volume name: %s.',
vol_instance['ElementName'])
volumename = vol_instance['ElementName']
configservice = self._find_eternus_service(CONSTANTS.STOR_CONF)
if configservice is None:
msg = (_('_delete_volume, volumename: %(volumename)s, '
'Storage Configuration Service not found.')
% {'volumename': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_delete_volume, volumename: %(volumename)s, '
'vol_instance: %(vol_instance)s, '
'Method: ReturnToStoragePool.',
{'volumename': volumename,
'vol_instance': vol_instance.path})
rc, errordesc, job = self._exec_eternus_service(
'ReturnToStoragePool',
configservice,
TheElement=vol_instance.path)
if rc != 0:
msg = (_('_delete_volume, volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': volumename,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_delete_volume, volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc})
|
Delete volume on ETERNUS.
|
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py#L467-L509
|
import ast
import base64
import time
from lxml import etree as ET
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils.secretutils import md5
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.fujitsu.eternus_dx import constants as CONSTANTS
from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_cli
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
FJ_ETERNUS_DX_OPT_opts = [
cfg.StrOpt('cinder_eternus_config_file',
default='/etc/cinder/cinder_fujitsu_eternus_dx.xml',
help='Config file for cinder eternus_dx volume driver.'),
]
CONF.register_opts(FJ_ETERNUS_DX_OPT_opts, group=conf.SHARED_CONF_GROUP)
class FJDXCommon(object):
VERSION = "1.3.0"
stats = {
'driver_version': VERSION,
'storage_protocol': None,
'vendor_name': 'FUJITSU',
'QoS_support': False,
'volume_backend_name': None,
}
def __init__(self, prtcl, configuration=None):
self.pywbemAvailable = pywbemAvailable
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts)
if prtcl == 'iSCSI':
self.configuration.iscsi_ip_address = (
self._get_drvcfg('EternusISCSIIP'))
self.conn = None
self.fjdxcli = {}
self._check_user()
@staticmethod
def get_driver_options():
return FJ_ETERNUS_DX_OPT_opts
def create_volume(self, volume):
LOG.debug('create_volume, '
'volume id: %(vid)s, volume size: %(vsize)s.',
{'vid': volume['id'], 'vsize': volume['size']})
self.conn = self._get_eternus_connection()
volumesize = int(volume['size']) * units.Gi
volumename = self._create_volume_name(volume['id'])
LOG.debug('create_volume, volumename: %(volumename)s, '
'volumesize: %(volumesize)u.',
{'volumename': volumename,
'volumesize': volumesize})
eternus_pool = volume_utils.extract_host(volume['host'], 'pool')
pool = self._find_pool(eternus_pool)
if 'RSP' in pool['InstanceID']:
pooltype = CONSTANTS.RAIDGROUP
else:
pooltype = CONSTANTS.TPPOOL
configservice = self._find_eternus_service(CONSTANTS.STOR_CONF)
if configservice is None:
msg = (_('create_volume, volume: %(volume)s, '
'volumename: %(volumename)s, '
'eternus_pool: %(eternus_pool)s, '
'Storage Configuration Service not found.')
% {'volume': volume,
'volumename': volumename,
'eternus_pool': eternus_pool})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('create_volume, '
'CreateOrModifyElementFromStoragePool, '
'ConfigService: %(service)s, '
'ElementName: %(volumename)s, '
'InPool: %(eternus_pool)s, '
'ElementType: %(pooltype)u, '
'Size: %(volumesize)u.',
{'service': configservice,
'volumename': volumename,
'eternus_pool': eternus_pool,
'pooltype': pooltype,
'volumesize': volumesize})
rc, errordesc, job = self._exec_eternus_service(
'CreateOrModifyElementFromStoragePool',
configservice,
ElementName=volumename,
InPool=pool,
ElementType=self._pywbem_uint(pooltype, '16'),
Size=self._pywbem_uint(volumesize, '64'))
if rc == CONSTANTS.VOLUMENAME_IN_USE:
LOG.warning('create_volume, '
'volumename: %(volumename)s, '
'Element Name is in use.',
{'volumename': volumename})
vol_instance = self._find_lun(volume)
element = vol_instance
elif rc != 0:
msg = (_('create_volume, '
'volumename: %(volumename)s, '
'poolname: %(eternus_pool)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': volumename,
'eternus_pool': eternus_pool,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
element = job['TheElement']
try:
systemnamelist = (
self._enum_eternus_instances('FUJITSU_StorageProduct'))
except Exception:
msg = (_('create_volume, '
'volume: %(volume)s, '
'EnumerateInstances, '
'cannot connect to ETERNUS.')
% {'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('create_volume, '
'volumename: %(volumename)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s, '
'Backend: %(backend)s, '
'Pool Name: %(eternus_pool)s, '
'Pool Type: %(pooltype)s.',
{'volumename': volumename,
'rc': rc,
'errordesc': errordesc,
'backend': systemnamelist[0]['IdentifyingNumber'],
'eternus_pool': eternus_pool,
'pooltype': CONSTANTS.POOL_TYPE_dic[pooltype]})
element_path = {
'classname': element.classname,
'keybindings': {
'CreationClassName': element['CreationClassName'],
'SystemName': element['SystemName'],
'DeviceID': element['DeviceID'],
'SystemCreationClassName': element['SystemCreationClassName']
}
}
volume_no = "0x" + element['DeviceID'][24:28]
metadata = {'FJ_Backend': systemnamelist[0]['IdentifyingNumber'],
'FJ_Volume_Name': volumename,
'FJ_Volume_No': volume_no,
'FJ_Pool_Name': eternus_pool,
'FJ_Pool_Type': CONSTANTS.POOL_TYPE_dic[pooltype]}
return (element_path, metadata)
def create_pool_info(self, pool_instance, volume_count, pool_type):
LOG.debug('create_pool_info, pool_instance: %(pool)s, '
'volume_count: %(volcount)s, pool_type: %(ptype)s.',
{'pool': pool_instance,
'volcount': volume_count, 'ptype': pool_type})
if pool_type not in CONSTANTS.POOL_TYPE_list:
msg = (_('Invalid pool type was specified : %s.') % pool_type)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
total_gb = pool_instance['TotalManagedSpace'] / units.Gi
free_gb = pool_instance['RemainingManagedSpace'] / units.Gi
if hasattr(pool_instance, 'provisioned_capacity_gb'):
prov_gb = pool_instance.provisioned_capacity_gb
else:
prov_gb = total_gb - free_gb
if pool_type == 'RAID':
useable_gb = free_gb
else:
max_capacity = total_gb * float(
self.configuration.max_over_subscription_ratio)
useable_gb = max_capacity - prov_gb
pool = {
'name': pool_instance['ElementName'],
'path': pool_instance.path,
'total_capacity_gb': total_gb,
'free_capacity_gb': free_gb,
'type': pool_type,
'volume_count': volume_count,
'provisioned_capacity_gb': prov_gb,
'useable_capacity_gb': useable_gb
}
LOG.debug('create_pool_info, pool: %s.', pool)
return pool
def create_volume_from_snapshot(self, volume, snapshot):
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, volume size: %(vsize)s, '
'snapshot id: %(sid)s.',
{'vid': volume['id'], 'vsize': volume['size'],
'sid': snapshot['id']})
self.conn = self._get_eternus_connection()
source_volume_instance = self._find_lun(snapshot)
if source_volume_instance is None:
msg = _('create_volume_from_snapshot, '
'Source Volume does not exist in ETERNUS.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
(element_path, metadata) = self.create_volume(volume)
target_volume_instancename = self._create_eternus_instance_name(
element_path['classname'], element_path['keybindings'])
try:
target_volume_instance = (
self._get_eternus_instance(target_volume_instancename))
except Exception:
msg = (_('create_volume_from_snapshot, '
'target volume instancename: %(volume_instancename)s, '
'Get Instance Failed.')
% {'volume_instancename': target_volume_instancename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self._create_local_cloned_volume(target_volume_instance,
source_volume_instance)
return (element_path, metadata)
def create_cloned_volume(self, volume, src_vref):
LOG.debug('create_cloned_volume, '
'tgt: (%(tid)s, %(tsize)s), src: (%(sid)s, %(ssize)s).',
{'tid': volume['id'], 'tsize': volume['size'],
'sid': src_vref['id'], 'ssize': src_vref['size']})
self.conn = self._get_eternus_connection()
source_volume_instance = self._find_lun(src_vref)
if source_volume_instance is None:
msg = _('create_cloned_volume, '
'Source Volume does not exist in ETERNUS.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
(element_path, metadata) = self.create_volume(volume)
target_volume_instancename = self._create_eternus_instance_name(
element_path['classname'], element_path['keybindings'])
try:
target_volume_instance = (
self._get_eternus_instance(target_volume_instancename))
except Exception:
msg = (_('create_cloned_volume, '
'target volume instancename: %(volume_instancename)s, '
'Get Instance Failed.')
% {'volume_instancename': target_volume_instancename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self._create_local_cloned_volume(target_volume_instance,
source_volume_instance)
return (element_path, metadata)
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def _create_local_cloned_volume(self, tgt_vol_instance, src_vol_instance):
s_volumename = src_vol_instance['ElementName']
t_volumename = tgt_vol_instance['ElementName']
LOG.debug('_create_local_cloned_volume, '
'tgt volume name: %(t_volumename)s, '
'src volume name: %(s_volumename)s, ',
{'t_volumename': t_volumename,
's_volumename': s_volumename})
repservice = self._find_eternus_service(CONSTANTS.REPL)
if repservice is None:
msg = _('_create_local_cloned_volume, '
'Replication Service not found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
rc, errordesc, job = self._exec_eternus_service(
'CreateElementReplica',
repservice,
SyncType=self._pywbem_uint(8, '16'),
SourceElement=src_vol_instance.path,
TargetElement=tgt_vol_instance.path)
if rc != 0:
msg = (_('_create_local_cloned_volume, '
'volumename: %(volumename)s, '
'sourcevolumename: %(sourcevolumename)s, '
'source volume instance: %(source_volume)s, '
'target volume instance: %(target_volume)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s.')
% {'volumename': t_volumename,
'sourcevolumename': s_volumename,
'source_volume': src_vol_instance.path,
'target_volume': tgt_vol_instance.path,
'rc': rc,
'errordesc': errordesc})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_create_local_cloned_volume, out: %(rc)s, %(job)s.',
{'rc': rc, 'job': job})
def delete_volume(self, volume):
LOG.debug('delete_volume, volume id: %s.', volume['id'])
self.conn = self._get_eternus_connection()
vol_exist = self._delete_volume_setting(volume)
if not vol_exist:
LOG.debug('delete_volume, volume not found in 1st check.')
return False
vol_instance = self._find_lun(volume)
if vol_instance is None:
LOG.debug('delete_volume, volume not found in 2nd check, '
'but no problem.')
return True
self._delete_volume(vol_instance)
return True
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
def _delete_volume_setting(self, volume):
LOG.debug('_delete_volume_setting, volume id: %s.', volume['id'])
volumename = self._create_volume_name(volume['id'])
vol_instance = self._find_lun(volume)
if vol_instance is None:
LOG.info('_delete_volume_setting, volumename:%(volumename)s, '
'volume not found on ETERNUS.',
{'volumename': volumename})
return False
self._unmap_lun(volume, None, force=True)
cpsessionlist = self._find_copysession(vol_instance)
delete_copysession_list = []
wait_copysession_list = []
for cpsession in cpsessionlist:
LOG.debug('_delete_volume_setting, '
'volumename: %(volumename)s, '
'cpsession: %(cpsession)s.',
{'volumename': volumename,
'cpsession': cpsession})
if cpsession['SyncedElement'] == vol_instance.path:
delete_copysession_list.append(cpsession)
elif cpsession['SystemElement'] == vol_instance.path:
wait_copysession_list.append(cpsession)
LOG.debug('_delete_volume_setting, '
'wait_cpsession: %(wait_cpsession)s, '
'delete_cpsession: %(delete_cpsession)s.',
{'wait_cpsession': wait_copysession_list,
'delete_cpsession': delete_copysession_list})
for cpsession in wait_copysession_list:
self._wait_for_copy_complete(cpsession)
for cpsession in delete_copysession_list:
self._delete_copysession(cpsession)
LOG.debug('_delete_volume_setting, '
'wait_cpsession: %(wait_cpsession)s, '
'delete_cpsession: %(delete_cpsession)s, complete.',
{'wait_cpsession': wait_copysession_list,
'delete_cpsession': delete_copysession_list})
return True
@lockutils.synchronized('ETERNUS-vol', 'cinder-', True)
|
Apache License 2.0
|
gmr/queries
|
queries/pool.py
|
Pool.get
|
python
|
def get(self, session):
idle = self.idle_connections
if idle:
connection = idle.pop(0)
connection.lock(session)
if self.idle_start:
with self._lock:
self.idle_start = None
return connection.handle
raise NoIdleConnectionsError(self.id)
|
Return an idle connection and assign the session to the connection
:param queries.Session session: The session to assign
:rtype: psycopg2.extensions.connection
:raises: NoIdleConnectionsError
|
https://github.com/gmr/queries/blob/bcd4236fb536da7c705ee9a7972f373c12e35c7a/queries/pool.py#L256-L272
|
import datetime
import logging
import os
import threading
import time
import weakref
import psycopg2
LOGGER = logging.getLogger(__name__)
DEFAULT_IDLE_TTL = 60
DEFAULT_MAX_SIZE = int(os.environ.get('QUERIES_MAX_POOL_SIZE', 1))
class Connection(object):
_lock = threading.Lock()
def __init__(self, handle):
self.handle = handle
self.used_by = None
self.executions = 0
self.exceptions = 0
def close(self):
LOGGER.debug('Connection %s closing', self.id)
if self.busy and not self.closed:
raise ConnectionBusyError(self)
with self._lock:
if not self.handle.closed:
try:
self.handle.close()
except psycopg2.InterfaceError as error:
LOGGER.error('Error closing socket: %s', error)
@property
def closed(self):
return self.handle.closed != 0
@property
def busy(self):
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None
@property
def executing(self):
return self.handle.isexecuting()
def free(self):
LOGGER.debug('Connection %s freeing', self.id)
if self.handle.isexecuting():
raise ConnectionBusyError(self)
with self._lock:
self.used_by = None
LOGGER.debug('Connection %s freed', self.id)
@property
def id(self):
return id(self.handle)
def lock(self, session):
if self.busy:
raise ConnectionBusyError(self)
with self._lock:
self.used_by = weakref.ref(session)
LOGGER.debug('Connection %s locked', self.id)
@property
def locked(self):
return self.used_by is not None
class Pool(object):
_lock = threading.Lock()
idle_start = None
idle_ttl = DEFAULT_IDLE_TTL
max_size = DEFAULT_MAX_SIZE
def __init__(self,
pool_id,
idle_ttl=DEFAULT_IDLE_TTL,
max_size=DEFAULT_MAX_SIZE,
time_method=None):
self.connections = {}
self._id = pool_id
self.idle_ttl = idle_ttl
self.max_size = max_size
self.time_method = time_method or time.time
def __contains__(self, connection):
return id(connection) in self.connections
def __len__(self):
return len(self.connections)
def add(self, connection):
if id(connection) in self.connections:
raise ValueError('Connection already exists in pool')
if len(self.connections) == self.max_size:
LOGGER.warning('Race condition found when adding new connection')
try:
connection.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.error('Error closing the conn that cant be used: %s',
error)
raise PoolFullError(self)
with self._lock:
self.connections[id(connection)] = Connection(connection)
LOGGER.debug('Pool %s added connection %s', self.id, id(connection))
@property
def busy_connections(self):
return [c for c in self.connections.values()
if c.busy and not c.closed]
def clean(self):
LOGGER.debug('Cleaning the pool')
for connection in [self.connections[k] for k in self.connections if
self.connections[k].closed]:
LOGGER.debug('Removing %s', connection.id)
self.remove(connection.handle)
if self.idle_duration > self.idle_ttl:
self.close()
LOGGER.debug('Pool %s cleaned', self.id)
def close(self):
for cid in list(self.connections.keys()):
self.remove(self.connections[cid].handle)
LOGGER.debug('Pool %s closed', self.id)
@property
def closed_connections(self):
return [c for c in self.connections.values() if c.closed]
def connection_handle(self, connection):
return self.connections[id(connection)]
@property
def executing_connections(self):
return [c for c in self.connections.values() if c.executing]
def free(self, connection):
LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection))
try:
self.connection_handle(connection).free()
except KeyError:
raise ConnectionNotFoundError(self.id, id(connection))
if self.idle_connections == list(self.connections.values()):
with self._lock:
self.idle_start = self.time_method()
LOGGER.debug('Pool %s freed connection %s', self.id, id(connection))
|
BSD 3-Clause New or Revised License
|
quantopian/zipline
|
zipline/pipeline/domain.py
|
IDomain.data_query_cutoff_for_sessions
|
python
|
def data_query_cutoff_for_sessions(self, sessions):
|
Compute the data query cutoff time for the given sessions.
Parameters
----------
sessions : pd.DatetimeIndex
The sessions to get the data query cutoff times for. This index
will contain all midnight UTC values.
Returns
-------
data_query_cutoff : pd.DatetimeIndex
Timestamp of the last minute for which data should be considered
"available" on each session.
|
https://github.com/quantopian/zipline/blob/014f1fc339dc8b7671d29be2d85ce57d3daec343/zipline/pipeline/domain.py#L60-L74
|
import datetime
from textwrap import dedent
from interface import default, implements, Interface
import numpy as np
import pandas as pd
import pytz
from trading_calendars import get_calendar
from zipline.country import CountryCode
from zipline.utils.formatting import bulleted_list
from zipline.utils.input_validation import expect_types, optional
from zipline.utils.memoize import lazyval
from zipline.utils.pandas_utils import days_at_time
class IDomain(Interface):
def all_sessions(self):
@property
def country_code(self):
|
Apache License 2.0
|
jeeftor/alfredtoday
|
src/lib/pyexchange/base/calendar.py
|
BaseExchangeCalendarEvent.validate
|
python
|
def validate(self):
if not self.start:
raise ValueError("Event has no start date")
if not self.end:
raise ValueError("Event has no end date")
if self.end < self.start:
raise ValueError("Start date is after end date")
if self.reminder_minutes_before_start and not isinstance(self.reminder_minutes_before_start, int):
raise TypeError("reminder_minutes_before_start must be of type int")
if self.is_all_day and not isinstance(self.is_all_day, bool):
raise TypeError("is_all_day must be of type bool")
|
Validates that all required fields are present
|
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/pyexchange/base/calendar.py#L307-L322
|
from collections import namedtuple
ExchangeEventOrganizer = namedtuple('ExchangeEventOrganizer', ['name', 'email'])
ExchangeEventAttendee = namedtuple('ExchangeEventAttendee', ['name', 'email', 'required'])
ExchangeEventResponse = namedtuple('ExchangeEventResponse', ['name', 'email', 'response', 'last_response', 'required'])
RESPONSE_ACCEPTED = u'Accept'
RESPONSE_DECLINED = u'Decline'
RESPONSE_TENTATIVE = u'Tentative'
RESPONSE_UNKNOWN = u'Unknown'
RESPONSES = [RESPONSE_ACCEPTED, RESPONSE_DECLINED, RESPONSE_TENTATIVE, RESPONSE_UNKNOWN]
class BaseExchangeCalendarService(object):
def __init__(self, service, calendar_id):
self.service = service
self.calendar_id = calendar_id
def event(self, id, *args, **kwargs):
raise NotImplementedError
def get_event(self, id):
raise NotImplementedError
def new_event(self, **properties):
raise NotImplementedError
class BaseExchangeCalendarEvent(object):
_id = None
_change_key = None
service = None
calendar_id = None
subject = u''
start = None
end = None
location = None
html_body = None
text_body = None
attachments = None
organizer = None
reminder_minutes_before_start = None
is_all_day = None
is_online_meeting = None
recurrence = None
recurrence_end_date = None
recurrence_days = None
recurrence_interval = None
_type = None
_attendees = {}
_resources = {}
_conflicting_event_ids = []
_track_dirty_attributes = False
_dirty_attributes = set()
DATA_ATTRIBUTES = [
u'_id', u'subject', u'start', u'end', u'location', u'is_online_meeting', u'html_body', u'text_body', u'organizer',
u'_attendees', u'_resources', u'reminder_minutes_before_start', u'is_all_day',
'recurrence', 'recurrence_interval', 'recurrence_days', 'recurrence_day',
]
RECURRENCE_ATTRIBUTES = [
'recurrence', 'recurrence_end_date', 'recurrence_days', 'recurrence_interval',
]
WEEKLY_DAYS = [u'Sunday', u'Monday', u'Tuesday', u'Wednesday', u'Thursday', u'Friday', u'Saturday']
def __init__(self, service, id=None, calendar_id=u'calendar', xml=None, **kwargs):
self.service = service
self.calendar_id = calendar_id
if xml is not None:
self._init_from_xml(xml)
elif id is None:
self._update_properties(kwargs)
else:
self._init_from_service(id)
self._track_dirty_attributes = True
def _init_from_service(self, id):
raise NotImplementedError
def _init_from_xml(self, xml):
raise NotImplementedError
@property
def id(self):
return self._id
@property
def conflicting_event_ids(self):
return self._conflicting_event_ids
@property
def change_key(self):
return self._change_key
@property
def body(self):
return self.html_body or self.text_body or None
@property
def type(self):
return self._type
@property
def attendees(self):
return [attendee for attendee in self._attendees.values()]
@attendees.setter
def attendees(self, attendees):
self._attendees = self._build_resource_dictionary(attendees)
self._dirty_attributes.add(u'attendees')
@property
def required_attendees(self):
return [attendee for attendee in self._attendees.values() if attendee.required]
@required_attendees.setter
def required_attendees(self, attendees):
required = self._build_resource_dictionary(attendees, required=True)
for attendee in self.required_attendees:
if attendee.email not in required.keys():
del self._attendees[attendee.email]
for email in required:
self._attendees[email] = required[email]
self._dirty_attributes.add(u'attendees')
@property
def optional_attendees(self):
return [attendee for attendee in self._attendees.values() if not attendee.required]
@optional_attendees.setter
def optional_attendees(self, attendees):
optional = self._build_resource_dictionary(attendees, required=False)
for attendee in self.optional_attendees:
if attendee.email not in optional.keys():
del self._attendees[attendee.email]
for email in optional:
self._attendees[email] = optional[email]
self._dirty_attributes.add(u'attendees')
def add_attendees(self, attendees, required=True):
new_attendees = self._build_resource_dictionary(attendees, required=required)
for email in new_attendees:
self._attendees[email] = new_attendees[email]
self._dirty_attributes.add(u'attendees')
def remove_attendees(self, attendees):
attendees_to_delete = self._build_resource_dictionary(attendees)
for email in attendees_to_delete.keys():
if email in self._attendees:
del self._attendees[email]
self._dirty_attributes.add(u'attendees')
@property
def resources(self):
return [resource for resource in self._resources.values()]
@resources.setter
def resources(self, resources):
self._resources = self._build_resource_dictionary(resources)
self._dirty_attributes.add(u'resources')
def add_resources(self, resources):
new_resources = self._build_resource_dictionary(resources)
for key in new_resources:
self._resources[key] = new_resources[key]
self._dirty_attributes.add(u'resources')
def remove_resources(self, resources):
resources_to_delete = self._build_resource_dictionary(resources)
for email in resources_to_delete.keys():
if email in self._resources:
del self._resources[email]
self._dirty_attributes.add(u'resources')
@property
def conference_room(self):
if self.resources and len(self.resources) == 1:
return self.resources[0]
|
MIT License
|
enteee/pdml2flow
|
pdml2flow/plugin.py
|
Plugin2.flow_new
|
python
|
def flow_new(self, flow, frame):
pass
|
Called every time a new flow is opened.
|
https://github.com/enteee/pdml2flow/blob/2e5da6f03bc799f0e8ef77dd987031b969d4a5df/pdml2flow/plugin.py#L19-L21
|
class Plugin2(object):
@staticmethod
def help():
pass
def __init__(self, *args):
pass
def __deinit__(self):
pass
|
Apache License 2.0
|
neuralensemble/python-neo
|
neo/core/analogsignal.py
|
AnalogSignal.t_start
|
python
|
def t_start(self):
return self._t_start
|
Time when signal begins.
|
https://github.com/neuralensemble/python-neo/blob/889060c022a56b9c3122afee68cbd5d83e4abe78/neo/core/analogsignal.py#L349-L353
|
import logging
try:
import scipy.signal
except ImportError as err:
HAVE_SCIPY = False
else:
HAVE_SCIPY = True
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations, intersect_annotations
from neo.core.dataobject import DataObject
from copy import copy, deepcopy
from neo.core.basesignal import BaseSignal
logger = logging.getLogger("Neo")
def _get_sampling_rate(sampling_rate, sampling_period):
if sampling_period is None:
if sampling_rate is None:
raise ValueError("You must provide either the sampling rate or " + "sampling period")
elif sampling_rate is None:
sampling_rate = 1.0 / sampling_period
elif sampling_period != 1.0 / sampling_rate:
raise ValueError('The sampling_rate has to be 1/sampling_period')
if not hasattr(sampling_rate, 'units'):
raise TypeError("Sampling rate/sampling period must have units")
return sampling_rate
def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, annotations=None,
segment=None):
obj = cls(signal=signal, units=units, dtype=dtype, copy=copy,
t_start=t_start, sampling_rate=sampling_rate,
sampling_period=sampling_period, name=name,
file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
obj.segment = segment
return obj
class AnalogSignal(BaseSignal):
_parent_objects = ('Segment',)
_parent_attrs = ('segment',)
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = BaseNeo._recommended_attrs
def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
signal = cls._rescale(signal, units=units)
obj = pq.Quantity(signal, units=units, dtype=dtype, copy=copy).view(cls)
if obj.ndim == 1:
obj.shape = (-1, 1)
if t_start is None:
raise ValueError('t_start cannot be None')
obj._t_start = t_start
obj._sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.segment = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
def __reduce__(self):
return _new_AnalogSignalArray, (self.__class__, np.array(self), self.units, self.dtype,
True, self.t_start, self.sampling_rate,
self.sampling_period, self.name, self.file_origin,
self.description, self.array_annotations,
self.annotations, self.segment)
def _array_finalize_spec(self, obj):
self._t_start = getattr(obj, '_t_start', 0 * pq.s)
self._sampling_rate = getattr(obj, '_sampling_rate', None)
return obj
def __repr__(self):
return ('<%s(%s, [%s, %s], sampling rate: %s)>' % (self.__class__.__name__,
super().__repr__(),
self.t_start, self.t_stop,
self.sampling_rate))
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
obj = super().__getitem__(i)
obj = pq.Quantity(obj.magnitude, units=obj.units)
elif isinstance(i, tuple):
obj = super().__getitem__(i)
j, k = i
if isinstance(j, (int, np.integer)):
obj = pq.Quantity(obj.magnitude, units=obj.units)
else:
if isinstance(j, slice):
if j.start:
obj.t_start = (self.t_start + j.start * self.sampling_period)
if j.step:
obj.sampling_period *= j.step
elif isinstance(j, np.ndarray):
raise NotImplementedError(
"Arrays not yet supported")
else:
raise TypeError("%s not supported" % type(j))
if isinstance(k, (int, np.integer)):
obj = obj.reshape(-1, 1)
obj.array_annotate(**deepcopy(self.array_annotations_at_index(k)))
elif isinstance(i, slice):
obj = super().__getitem__(i)
if i.start:
obj.t_start = self.t_start + i.start * self.sampling_period
obj.array_annotations = deepcopy(self.array_annotations)
elif isinstance(i, np.ndarray):
new_time_dims = np.sum(i, axis=0)
if len(new_time_dims) and all(new_time_dims == new_time_dims[0]):
obj = np.asarray(self).T.__getitem__(i.T)
obj = obj.T.reshape(self.shape[1], -1).T
obj = pq.Quantity(obj, units=self.units)
else:
raise IndexError("indexing of an AnalogSignals needs to keep the same number of "
"sample for each trace contained")
else:
raise IndexError("index should be an integer, tuple, slice or boolean numpy array")
return obj
def __setitem__(self, i, value):
if isinstance(i, int):
i = slice(i, i + 1)
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
i = (j, slice(k, k + 1))
return super().__setitem__(i, value)
@property
def sampling_rate(self):
return self._sampling_rate
@sampling_rate.setter
def sampling_rate(self, rate):
if rate is None:
raise ValueError('sampling_rate cannot be None')
elif not hasattr(rate, 'units'):
raise ValueError('sampling_rate must have units')
self._sampling_rate = rate
@property
def sampling_period(self):
return 1. / self.sampling_rate
@sampling_period.setter
def sampling_period(self, period):
if period is None:
raise ValueError('sampling_period cannot be None')
elif not hasattr(period, 'units'):
raise ValueError('sampling_period must have units')
self.sampling_rate = 1. / period
@property
|
BSD 3-Clause New or Revised License
|
nervanasystems/ngraph-python
|
ngraph/frontends/onnx/onnx_importer/ops_bridge.py
|
Reshape
|
python
|
def Reshape(onnx_node, ng_inputs):
data = ng_inputs[0]
shape = onnx_node.get_attribute_value('shape', data.axes.lengths)
"""
new_axes = ng.make_axes([ng.make_axis(length=length) for length in shape])
x = ng.flatten(data)
x = ng.cast_axes(x, new_axes.flatten())
x = ng.unflatten(x)
return cast_to_pos_axes(x)
"""
return reshape_workaround(data, shape)
|
Reshape the input tensor similar to numpy.reshape.
|
https://github.com/nervanasystems/ngraph-python/blob/ac032c83c7152b615a9ad129d54d350f9d6a2986/ngraph/frontends/onnx/onnx_importer/ops_bridge.py#L428-L442
|
from __future__ import print_function
from __future__ import division
import logging
from string import ascii_letters
from functools import reduce
import ngraph as ng
from ngraph.frontends.onnx.onnx_importer.utils.axes import reorder_axes, reshape_workaround, rename_axes
from ngraph.frontends.onnx.onnx_importer.utils.misc import split_pads_into_pairs
from ngraph.frontends.onnx.onnx_importer.utils.pool import make_pooling_op, make_global_pooling_op
from ngraph.frontends.onnx.onnx_importer.utils.reduction import make_reduction_op
from ngraph.frontends.onnx.onnx_importer.utils.binary import cast_axes_for_binary_broadcast, cast_axes_for_matmul
from ngraph.frontends.onnx.onnx_importer.utils.conv import make_convolution_op
from ngraph.frontends.tensorflow.tf_importer.utils_pos_axes import cast_to_pos_axes
logger = logging.getLogger(__name__)
def make_ng_nodes(onnx_node):
op_type = onnx_node.op_type
try:
ng_node_factory = globals()[op_type]
except KeyError:
raise NotImplementedError('Unknown operation: %s', op_type)
ng_inputs = onnx_node.get_ng_inputs()
ng_outputs = ng_node_factory(onnx_node, ng_inputs)
if type(ng_outputs) != tuple:
ng_outputs = (ng_outputs,)
return ng_outputs
def Abs(onnx_node, ng_inputs):
return ng.absolute(ng_inputs[0])
def Ceil(onnx_node, ng_inputs):
return -ng.floordivide(-ng_inputs[0], 1)
def Exp(onnx_node, ng_inputs):
return ng.exp(ng_inputs[0])
def Floor(onnx_node, ng_inputs):
return ng.floordivide(ng_inputs[0], 1)
def Log(onnx_node, ng_inputs):
return ng.log(ng_inputs[0])
def Neg(onnx_node, ng_inputs):
return ng.negative(ng_inputs[0])
def Reciprocal(onnx_node, ng_inputs):
return ng.reciprocal(ng_inputs[0])
def Sqrt(onnx_node, ng_inputs):
return ng.sqrt(ng_inputs[0])
def Sigmoid(onnx_node, ng_inputs):
return ng.sigmoid(ng_inputs[0])
def Tanh(onnx_node, ng_inputs):
return ng.tanh(ng_inputs[0])
def Relu(onnx_node, ng_inputs):
return ng.maximum(ng_inputs[0], 0.)
def LeakyRelu(onnx_node, ng_inputs):
alpha = onnx_node.get_attribute_value('alpha', 0.01)
if not 0 <= alpha <= 1:
logger.warning('LeakyRelu node (%s): alpha value should be in range (0,1), but is: %s',
onnx_node.name, alpha)
return ng.maximum(alpha * ng_inputs[0], ng_inputs[0])
def PRelu(onnx_node, ng_inputs):
x, slope = ng_inputs
x = ng.broadcast(x, x.axes + slope.axes)
slope = ng.broadcast(slope, axes=x.axes)
return ng.maximum(slope * x, x)
def Selu(onnx_node, ng_inputs):
x = ng_inputs[0]
alpha = onnx_node.get_attribute_value('alpha', 1.6732)
gamma = onnx_node.get_attribute_value('gamma', 1.0507)
return gamma * (ng.maximum(x, 0) + alpha * (ng.exp(-ng.maximum(-x, 0)) - 1))
def Elu(onnx_node, ng_inputs):
x = ng_inputs[0]
alpha = onnx_node.get_attribute_value('alpha', 1)
if not alpha < 0:
logger.warning('Elu node (%s): alpha value should be positive, but is: %s',
onnx_node.name, alpha)
return ng.maximum(x, 0) + alpha * (ng.exp(-ng.maximum(-x, 0)) - 1)
def Softplus(onnx_node, ng_inputs):
return ng.log((ng.exp(ng_inputs[0]) + 1))
def ReduceSum(onnx_node, ng_inputs):
return make_reduction_op(ng.sum, onnx_node, ng_inputs[0])
def ReduceMax(onnx_node, ng_inputs):
return make_reduction_op(ng.max, onnx_node, ng_inputs[0])
def ReduceMin(onnx_node, ng_inputs):
return make_reduction_op(ng.min, onnx_node, ng_inputs[0])
def ReduceLogSumExp(onnx_node, ng_inputs):
op = ng.exp(ng_inputs[0])
op = make_reduction_op(ng.sum, onnx_node, op)
op = ng.log(op)
return op
def ReduceMean(onnx_node, ng_inputs):
return make_reduction_op(ng.mean, onnx_node, ng_inputs[0])
def ReduceProd(onnx_node, ng_inputs):
return make_reduction_op(ng.prod, onnx_node, ng_inputs[0])
def ArgMin(onnx_node, ng_inputs):
return make_reduction_op(ng.argmin, onnx_node, ng_inputs[0])
def ArgMax(onnx_node, ng_inputs):
return make_reduction_op(ng.argmax, onnx_node, ng_inputs[0])
def Add(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.add(left, right)
def Sub(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.subtract(left, right)
def Mul(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.multiply(left, right)
def Div(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.divide(left, right)
def Equal(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.equal(left, right)
def Less(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.less(left, right)
def Greater(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
return ng.greater(left, right)
def And(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
left = ng.not_equal(left, 0)
right = ng.not_equal(right, 0)
return left * right
def Or(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
left = ng.not_equal(left, 0)
right = ng.not_equal(right, 0)
return (left + right) > 0
def Xor(onnx_node, ng_inputs):
left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
left = ng.not_equal(left, 0)
right = ng.not_equal(right, 0)
return (left + right) % 2
def Not(onnx_node, ng_inputs):
return ng.equal(ng_inputs[0] + 1, 1)
def Sum(onnx_node, ng_inputs):
return reduce(ng.add, ng_inputs)
def Min(onnx_node, ng_inputs):
return reduce(ng.minimum, ng_inputs)
def Max(onnx_node, ng_inputs):
return reduce(ng.maximum, ng_inputs)
def Mean(onnx_node, ng_inputs):
return reduce(ng.add, ng_inputs) / len(ng_inputs)
def Dot(onnx_node, ng_inputs):
logger.warning('Dot node (%s): Dot operation is deprecated, use MatMul.', onnx_node.name)
return MatMul(onnx_node, ng_inputs)
def MatMul(onnx_node, ng_inputs):
left, right = cast_axes_for_matmul(*ng_inputs)
return cast_to_pos_axes(ng.dot(left, right))
def Gemm(onnx_node, ng_inputs):
input_a, input_b, input_c = ng_inputs
alpha = onnx_node.get_attribute_value('alpha', 1)
beta = onnx_node.get_attribute_value('beta', 1)
broadcast = onnx_node.get_attribute_value('broadcast', 1)
trans_a = onnx_node.get_attribute_value('transA', False)
trans_b = onnx_node.get_attribute_value('transB', False)
if not broadcast:
logger.warning('Gemm node (%s): import does not support broadcast value %s',
onnx_node.name, broadcast)
if trans_a:
input_a = ng.Transpose(input_a)
if trans_b:
input_b = ng.Transpose(input_b)
input_a, input_b = cast_axes_for_matmul(input_a, input_b)
a_dot_b = ng.dot(input_a, input_b)
a_dot_b = cast_to_pos_axes(a_dot_b)
return alpha * a_dot_b + beta * input_c
def Conv(onnx_node, ng_inputs):
return cast_to_pos_axes(make_convolution_op(onnx_node, ng_inputs))
def ConvTranspose(onnx_node, ng_inputs):
return cast_to_pos_axes(make_convolution_op(onnx_node, ng_inputs, transpose=True))
def Pad(onnx_node, ng_inputs):
pads = onnx_node.get_attribute_value('pads')
constant = 'constant'
mode = onnx_node.get_attribute_value('mode', constant)
value = onnx_node.get_attribute_value('value', 0)
if mode != constant or value != 0:
raise NotImplementedError('Pad node (%s): only constant padding with value=0 '
'is supported.', onnx_node.name)
pads = [pad for pad in split_pads_into_pairs(pads)]
return cast_to_pos_axes(ng.pad(ng_inputs[0], pads))
def AveragePool(onnx_node, ng_inputs):
return cast_to_pos_axes(make_pooling_op(onnx_node, ng_inputs))
def MaxPool(onnx_node, ng_inputs):
return cast_to_pos_axes(make_pooling_op(onnx_node, ng_inputs))
def GlobalMaxPool(onnx_node, ng_inputs):
return cast_to_pos_axes(make_global_pooling_op(onnx_node, ng_inputs))
def GlobalAveragePool(onnx_node, ng_inputs):
return cast_to_pos_axes(make_global_pooling_op(onnx_node, ng_inputs))
def Flatten(onnx_node, ng_inputs):
data = ng_inputs[0]
axis = onnx_node.get_attribute_value('axis', 1)
if axis < 0 or axis > len(data.axes):
raise ValueError('Flatten node (%s): %d is not a valid value for `axis`.',
onnx_node.name, axis)
return cast_to_pos_axes(ng.flatten_at(data, axis))
def Transpose(onnx_node, ng_inputs):
data = ng_inputs[0]
permute_axes = onnx_node.get_attribute_value('perm')
if permute_axes:
input_template = ''.join([ascii_letters[i] for i in range(len(data.axes))])
output_template = ''.join([ascii_letters[i] for i in permute_axes])
ng_op = reorder_axes(data, input_template, output_template)
else:
ng_op = ng.Transpose(data)
return cast_to_pos_axes(ng_op)
def Slice(onnx_node, ng_inputs):
x = ng_inputs[0]
starts = onnx_node.get_attribute_value('starts')
ends = onnx_node.get_attribute_value('ends')
if not (starts and ends and len(starts) == len(ends)):
raise ValueError('Slice node (%s): attributes `starts` and `ends` must be set '
'and of equal length.', onnx_node.name)
axes = onnx_node.get_attribute_value('axes', list(range(len(starts))))
slices_count = max(len(axes), *starts)
if slices_count > len(x.axes):
raise ValueError('Slice node (%s): specifies %d slices, there are only %d input axes.',
onnx_node.name, slices_count, len(x.axes))
slices = [slice(starts[axes.index(axis_number)], ends[axes.index(axis_number)])
if (axis_number in axes) else slice(None) for axis_number in range(len(x.axes))]
return cast_to_pos_axes(ng.tensor_slice(x, slices))
def Concat(onnx_node, ng_inputs):
axis = onnx_node.get_attribute_value('axis', 0)
if len(ng_inputs) < 2:
raise ValueError('Concat node (%s): requires at least 2 inputs, %d given.',
onnx_node.name, len(ng_inputs))
unique_input_ranks = {len(node.axes) for node in ng_inputs}
if len(unique_input_ranks) != 1:
raise ValueError('Concat node (%s): input tensors must be of equal rank.', onnx_node.name)
if axis >= unique_input_ranks.pop():
raise ValueError('Concat node (%s): `axis` attribute is out of range.', onnx_node.name)
ng_axis = ng_inputs[0].axes[axis]
return ng.concat_along_axis(ng_inputs, ng_axis)
def Squeeze(onnx_node, ng_inputs):
data = ng_inputs[0]
axes_to_squeeze = onnx_node.get_attribute_value('axes')
if max(axes_to_squeeze) >= len(data.axes):
raise ValueError('Squeeze node (%s): `axes` attribute value %d is out of range.',
onnx_node.name, max(axes_to_squeeze))
slices = [0 if index in axes_to_squeeze else
slice(None) for index, axis in enumerate(data.axes)]
return ng.tensor_slice(data, slices)
|
Apache License 2.0
|
mars-project/mars
|
mars/tensor/random/multivariate_normal.py
|
multivariate_normal
|
python
|
def multivariate_normal(
random_state,
mean,
cov,
size=None,
check_valid=None,
tol=None,
chunk_size=None,
gpu=None,
dtype=None,
):
mean = np.asarray(mean)
cov = np.asarray(cov)
if mean.ndim != 1:
raise ValueError("mean must be 1 dimensional")
if cov.ndim != 2:
raise ValueError("cov must be 1 dimensional")
if len(set(mean.shape + cov.shape)) != 1:
raise ValueError("mean and cov must have same length")
if dtype is None:
small_kw = {}
if check_valid:
small_kw["check_valid"] = check_valid
if tol:
small_kw["tol"] = tol
dtype = np.random.multivariate_normal(mean, cov, size=(0,), **small_kw).dtype
size = random_state._handle_size(size)
seed = gen_random_seeds(1, random_state.to_numpy())[0]
op = TensorMultivariateNormal(
mean=mean,
cov=cov,
size=size,
check_valid=check_valid,
tol=tol,
seed=seed,
gpu=gpu,
dtype=dtype,
)
return op(chunk_size=chunk_size)
|
Draw random samples from a multivariate normal distribution.
The multivariate normal, multinormal or Gaussian distribution is a
generalization of the one-dimensional normal distribution to higher
dimensions. Such a distribution is specified by its mean and
covariance matrix. These parameters are analogous to the mean
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
Parameters
----------
mean : 1-D array_like, of length N
Mean of the N-dimensional distribution.
cov : 2-D array_like, of shape (N, N)
Covariance matrix of the distribution. It must be symmetric and
positive-semidefinite for proper sampling.
size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
If no shape is specified, a single (`N`-D) sample is returned.
check_valid : { 'warn', 'raise', 'ignore' }, optional
Behavior when the covariance matrix is not positive semidefinite.
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : Tensor
The drawn samples, of shape *size*, if that was provided. If not,
the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Notes
-----
The mean is a coordinate in N-dimensional space, which represents the
location where samples are most likely to be generated. This is
analogous to the peak of the bell curve for the one-dimensional or
univariate normal distribution.
Covariance indicates the level to which two variables vary together.
From the multivariate normal distribution, we draw N-dimensional
samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
"spread").
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements, and only on
the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0, 0]
>>> cov = [[1, 0], [0, 100]] # diagonal covariance
Diagonal covariance means that points are oriented along x or y-axis:
>>> import matplotlib.pyplot as plt
>>> import mars.tensor as mt
>>> x, y = mt.random.multivariate_normal(mean, cov, 5000).T
>>> plt.plot(x.execute(), y.execute(), 'x')
>>> plt.axis('equal')
>>> plt.show()
Note that the covariance matrix must be positive semidefinite (a.k.a.
nonnegative-definite). Otherwise, the behavior of this method is
undefined and backwards compatibility is not guaranteed.
References
----------
.. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
Processes," 3rd ed., New York: McGraw-Hill, 1991.
.. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
Classification," 2nd ed., New York: Wiley, 2001.
Examples
--------
>>> mean = (1, 2)
>>> cov = [[1, 0], [0, 1]]
>>> x = mt.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> list(((x[0,0,:] - mean) < 0.6).execute())
[True, True]
|
https://github.com/mars-project/mars/blob/d50d9f8d8e966756e8b9dc80aca53a3e4607e7e0/mars/tensor/random/multivariate_normal.py#L161-L305
|
import itertools
import numpy as np
from ... import opcodes as OperandDef
from ...serialization.serializables import NDArrayField, StringField, Float64Field
from ...config import options
from ..utils import decide_chunk_sizes, gen_random_seeds
from ..array_utils import array_module, device
from .core import TensorRandomOperandMixin, TensorDistribution, TENSOR_CHUNK_TYPE
class TensorMultivariateNormal(TensorDistribution, TensorRandomOperandMixin):
_op_type_ = OperandDef.RAND_MULTIVARIATE_NORMAL
_fields_ = "_mean", "_cov", "_size", "_check_valid", "_tol"
_mean = NDArrayField("mean")
_cov = NDArrayField("cov")
_check_valid = StringField("check_valid")
_tol = Float64Field("tol")
_func_name = "multivariate_normal"
def __init__(
self,
mean=None,
cov=None,
size=None,
check_valid=None,
tol=None,
dtype=None,
**kw
):
dtype = np.dtype(dtype) if dtype is not None else dtype
super().__init__(
_mean=mean,
_cov=cov,
_size=size,
_check_valid=check_valid,
_tol=tol,
dtype=dtype,
**kw
)
@property
def mean(self):
return self._mean
@property
def cov(self):
return self._cov
@property
def check_valid(self):
return self._check_valid
@property
def tol(self):
return self._tol
def __call__(self, chunk_size=None):
N = self._mean.size
if self._size is None:
shape = (N,)
else:
try:
shape = tuple(self._size) + (N,)
except TypeError:
shape = (self._size, N)
return self.new_tensor(None, shape, raw_chunk_size=chunk_size)
@classmethod
def tile(cls, op):
tensor = op.outputs[0]
chunk_size = tensor.extra_params.raw_chunk_size or options.chunk_size
nsplits = decide_chunk_sizes(
tensor.shape[:-1], chunk_size, tensor.dtype.itemsize
) + ((tensor.shape[-1],),)
mean_chunk = op.mean.chunks[0] if hasattr(op.mean, "chunks") else op.mean
cov_chunk = op.cov.chunks[0] if hasattr(op.cov, "chunks") else op.cov
idxes = list(itertools.product(*[range(len(s)) for s in nsplits]))
seeds = gen_random_seeds(len(idxes), np.random.RandomState(op.seed))
out_chunks = []
for seed, out_idx, shape in zip(seeds, idxes, itertools.product(*nsplits)):
chunk_op = op.copy().reset_key()
chunk_op._state = None
chunk_op._seed = seed
chunk_op._size = shape[:-1]
out_chunk = chunk_op.new_chunk(
[mean_chunk, cov_chunk], shape=shape, index=out_idx
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors(
op.inputs, tensor.shape, chunks=out_chunks, nsplits=nsplits
)
@classmethod
def execute(cls, ctx, op):
xp = array_module(op.gpu)
if xp is np:
device_id = -1
else:
device_id = op.device or 0
with device(device_id):
rs = xp.random.RandomState(op.seed)
args = []
for k in op.args:
val = getattr(op, k, None)
if isinstance(val, TENSOR_CHUNK_TYPE):
args.append(ctx[val.key])
else:
args.append(val)
mean, cov = args[:2]
kw = {}
if args[2] is not None:
kw["size"] = args[2]
if args[3] is not None:
kw["check_valid"] = args[3]
if args[4] is not None:
kw["tol"] = args[4]
try:
res = rs.multivariate_normal(mean, cov, **kw)
if xp is not np:
ctx[op.outputs[0].key] = xp.asarray(res)
else:
ctx[op.outputs[0].key] = res
except AttributeError:
if xp is not np:
rs = np.random.RandomState(op.seed)
res = rs.multivariate_normal(mean, cov, **kw)
ctx[op.outputs[0].key] = xp.asarray(res)
else:
raise
|
Apache License 2.0
|
wikimedia/pywikibot
|
tests/flow_edit_tests.py
|
TestFlowReply.setUpClass
|
python
|
def setUpClass(cls):
super().setUpClass()
cls._topic_title = 'Topic:Stf56oxx0sd4dkj1'
|
Set up class.
|
https://github.com/wikimedia/pywikibot/blob/5097f5b9a7ef9d39f35f17edd11faf3086a01d1d/tests/flow_edit_tests.py#L52-L55
|
import unittest
from contextlib import contextmanager, suppress
from pywikibot.exceptions import LockedPageError
from pywikibot.flow import Board, Post, Topic
from tests.aspects import TestCase
MODERATION_REASON = 'Pywikibot test'
class TestFlowCreateTopic(TestCase):
family = 'wikipedia'
code = 'test'
login = True
write = True
def test_create_topic(self):
content = 'If you can read this, the Flow code in Pywikibot works!'
board = Board(self.site, 'Talk:Pywikibot test')
topic = board.new_topic(MODERATION_REASON, content, 'wikitext')
first_post = topic.replies()[0]
wikitext = first_post.get(content_format='wikitext')
self.assertIn('wikitext', first_post._content)
self.assertNotIn('html', first_post._content)
self.assertIsInstance(wikitext, str)
self.assertEqual(wikitext, content)
class TestFlowReply(TestCase):
family = 'wikipedia'
code = 'test'
login = True
write = True
@classmethod
|
MIT License
|
rookiepeng/radarsimpy
|
radar.py
|
Radar.cal_noise
|
python
|
def cal_noise(self):
noise_amp = np.zeros([
self.channel_size,
self.transmitter.pulses,
self.samples_per_pulse,
])
Boltzmann_const = 1.38064852e-23
Ts = 290
input_noise_dbm = 10 * np.log10(Boltzmann_const * Ts * 1000)
receiver_noise_dbm = (input_noise_dbm + self.receiver.rf_gain +
self.receiver.noise_figure +
10 * np.log10(self.receiver.noise_bandwidth) +
self.receiver.baseband_gain)
receiver_noise_watts = 1e-3 * 10**(receiver_noise_dbm / 10
)
noise_amplitude_mixer = np.sqrt(receiver_noise_watts *
self.receiver.load_resistor)
noise_amplitude_peak = np.sqrt(2) * noise_amplitude_mixer + noise_amp
return noise_amplitude_peak
|
Calculate noise amplitudes
:return:
Peak to peak amplitude of noise.
``[channes/frames, pulses, samples]``
:rtype: numpy.3darray
|
https://github.com/rookiepeng/radarsimpy/blob/e26cc8eb1b913630dfdc9d3443279b4ae54c0109/radar.py#L806-L834
|
import numpy as np
import scipy.constants as const
from scipy.interpolate import interp1d
from .util import cal_phase_noise
class Transmitter:
def __init__(self,
f,
t,
f_offset=None,
tx_power=0,
prp=None,
pulses=1,
pn_f=None,
pn_power=None,
channels=[dict(location=(0, 0, 0))]):
self.tx_power = tx_power
self.pulses = pulses
self.channels = channels
if isinstance(f, (list, tuple, np.ndarray)):
self.f = np.array(f)
else:
self.f = np.array([f, f])
if isinstance(t, (list, tuple, np.ndarray)):
self.t = np.array(t)
self.t = self.t - self.t[0]
else:
self.t = np.array([0, t])
if len(self.f) != len(self.t):
raise ValueError(
'Length of `f`, and `t` should be the same')
if f_offset is not None:
if isinstance(f_offset, (list, tuple, np.ndarray)):
self.f_offset = np.array(f_offset)
else:
self.f_offset = f_offset+np.zeros(pulses)
else:
self.f_offset = np.zeros(pulses)
self.bandwidth = np.max(self.f) - np.min(self.f)
self.pulse_length = self.t[-1]-self.t[0]
self.fc_0 = (np.min(self.f)+np.max(self.f))/2
self.fc_vect = (np.min(self.f)+np.max(self.f))/2+self.f_offset
self.fc_frame = (np.min(self.fc_vect)+np.max(self.fc_vect))/2
self.pn_f = pn_f
self.pn_power = pn_power
if prp is None:
self.prp = self.pulse_length + np.zeros(pulses)
else:
if isinstance(prp, (list, tuple, np.ndarray)):
if len(prp) != pulses:
raise ValueError(
'Length of `prp` should equal to the \
length of `pulses`.')
else:
self.prp = prp
else:
self.prp = prp + np.zeros(pulses)
if np.min(self.prp < self.pulse_length):
raise ValueError(
'`prp` should be larger than `pulse_length`.')
self.chirp_start_time = np.cumsum(
self.prp)-self.prp[0]
self.max_code_length = 0
self.channel_size = len(self.channels)
self.locations = np.zeros((self.channel_size, 3))
self.mod = []
self.pulse_mod = np.ones(
(self.channel_size, self.pulses), dtype=complex)
self.antenna = []
self.az_patterns = []
self.az_angles = []
self.el_patterns = []
self.el_angles = []
self.az_func = []
self.el_func = []
self.pulse_phs = []
self.chip_length = []
self.polarization = np.zeros((self.channel_size, 3))
self.antenna_gains = np.zeros((self.channel_size))
self.grid = []
self.delay = np.zeros(self.channel_size)
for tx_idx, tx_element in enumerate(self.channels):
self.delay[tx_idx] = self.channels[tx_idx].get('delay', 0)
mod_enabled = True
amp = self.channels[tx_idx].get('amp', None)
if amp is not None:
if isinstance(amp, (list, tuple, np.ndarray)):
amp = np.array(amp)
else:
amp = np.array([amp, amp])
else:
mod_enabled = False
phs = self.channels[tx_idx].get('phs', None)
if phs is not None:
if isinstance(phs, (list, tuple, np.ndarray)):
phs = np.array(phs)
else:
phs = np.array([phs, phs])
else:
mod_enabled = False
if phs is not None and amp is None:
amp = np.ones_like(phs)
mod_enabled = True
elif phs is None and amp is not None:
phs = np.zeros_like(amp)
mod_enabled = True
t_mod = self.channels[tx_idx].get('t_mod', None)
if t_mod is not None:
if isinstance(t_mod, (list, tuple, np.ndarray)):
t_mod = np.array(t_mod)
else:
t_mod = np.array([0, t_mod])
else:
mod_enabled = False
if mod_enabled:
mod_var = amp*np.exp(1j*phs/180*np.pi)
else:
mod_var = None
self.mod.append({
'enabled': mod_enabled,
'var': mod_var,
't': t_mod
})
self.pulse_mod[tx_idx, :] = self.channels[tx_idx].get(
'pulse_amp', np.ones((self.pulses))) * np.exp(1j * self.channels[tx_idx].get(
'pulse_phs', np.zeros((self.pulses))) / 180 * np.pi)
self.locations[tx_idx, :] = np.array(
tx_element.get('location'))
self.polarization[tx_idx, :] = np.array(
tx_element.get('polarization', np.array([0, 0, 1])))
self.az_angles.append(
np.array(self.channels[tx_idx].get('azimuth_angle',
np.arange(-90, 91, 1))))
self.az_patterns.append(
np.array(self.channels[tx_idx].get('azimuth_pattern',
np.zeros(181))))
self.antenna_gains[tx_idx] = np.max(self.az_patterns[-1])
self.az_patterns[-1] = self.az_patterns[-1] - np.max(self.az_patterns[-1])
self.az_func.append(
interp1d(self.az_angles[-1], self.az_patterns[-1],
kind='linear', bounds_error=False, fill_value=-10000)
)
self.el_angles.append(
np.array(self.channels[tx_idx].get('elevation_angle',
np.arange(-90, 91, 1))))
self.el_patterns.append(
np.array(self.channels[tx_idx].get('elevation_pattern',
np.zeros(181))))
self.el_patterns[-1] = self.el_patterns[-1] - np.max(self.el_patterns[-1])
self.el_func.append(
interp1d(
self.el_angles[-1],
self.el_patterns[-1]-np.max(self.el_patterns[-1]),
kind='linear', bounds_error=False, fill_value=-10000)
)
self.grid.append(self.channels[tx_idx].get('grid', 1))
self.box_min = np.min(self.locations, axis=0)
self.box_max = np.max(self.locations, axis=0)
class Receiver:
def __init__(self, fs,
noise_figure=10,
rf_gain=0,
load_resistor=500,
baseband_gain=0,
channels=[dict(location=(0, 0, 0))]):
self.fs = fs
self.noise_figure = noise_figure
self.rf_gain = rf_gain
self.load_resistor = load_resistor
self.baseband_gain = baseband_gain
self.noise_bandwidth = self.fs / 2
self.channels = channels
self.channel_size = len(self.channels)
self.locations = np.zeros((self.channel_size, 3))
self.az_patterns = []
self.az_angles = []
self.az_func = []
self.el_patterns = []
self.el_angles = []
self.antenna_gains = np.zeros((self.channel_size))
self.el_func = []
for rx_idx, rx_element in enumerate(self.channels):
self.locations[rx_idx, :] = np.array(
rx_element.get('location'))
self.az_angles.append(
np.array(self.channels[rx_idx].get('azimuth_angle',
np.arange(-90, 91, 1))))
self.az_patterns.append(
np.array(self.channels[rx_idx].get('azimuth_pattern',
np.zeros(181))))
self.antenna_gains[rx_idx] = np.max(self.az_patterns[-1])
self.az_patterns[-1] = self.az_patterns[-1] - np.max(self.az_patterns[-1])
self.az_func.append(
interp1d(self.az_angles[-1], self.az_patterns[-1],
kind='linear', bounds_error=False, fill_value=-10000)
)
self.el_angles.append(
np.array(self.channels[rx_idx].get('elevation_angle',
np.arange(-90, 91, 1))))
self.el_patterns.append(
np.array(self.channels[rx_idx].get('elevation_pattern',
np.zeros(181))))
self.el_patterns[-1] = self.el_patterns[-1] - np.max(self.el_patterns[-1])
self.el_func.append(
interp1d(
self.el_angles[-1],
self.el_patterns[-1]-np.max(self.el_patterns[-1]),
kind='linear', bounds_error=False, fill_value=-10000)
)
self.box_min = np.min(self.locations, axis=0)
self.box_max = np.max(self.locations, axis=0)
class Radar:
def __init__(self,
transmitter,
receiver,
time=0,
seed=None,
**kwargs):
self.validation = kwargs.get('validation', False)
self.transmitter = transmitter
self.receiver = receiver
self.samples_per_pulse = int(self.transmitter.pulse_length *
self.receiver.fs)
self.t_offset = np.array(time)
self.frames = np.size(time)
if self.transmitter.bandwidth > 0:
self.max_range = (const.c * self.receiver.fs *
self.transmitter.pulse_length /
self.transmitter.bandwidth / 2)
self.unambiguous_speed = const.c / self.transmitter.prp[0] / self.transmitter.fc_0 / 2
self.range_resolution = const.c / 2 / self.transmitter.bandwidth
else:
self.max_range = 0
self.unambiguous_speed = 0
self.range_resolution = 0
self.channel_size = self.transmitter.channel_size * self.receiver.channel_size
self.virtual_array = np.repeat(
self.transmitter.locations, self.receiver.channel_size,
axis=0) + np.tile(self.receiver.locations,
(self.transmitter.channel_size, 1))
self.box_min = np.min(
[self.transmitter.box_min, self.receiver.box_min], axis=0)
self.box_max = np.max(
[self.transmitter.box_min, self.receiver.box_max], axis=0)
self.timestamp = self.gen_timestamp()
self.pulse_phs = self.cal_frame_phases()
self.noise = self.cal_noise()
if len(self.transmitter.f) > 2:
fun_f_t = interp1d(self.transmitter.t,
self.transmitter.f, kind='linear')
self.t = np.linspace(
self.transmitter.t[0],
self.transmitter.t[-1],
self.samples_per_pulse*100)
self.f = fun_f_t(self.t)
else:
self.f = self.transmitter.f
self.t = self.transmitter.t
self.delta_f = np.ediff1d(self.f, to_begin=0)
self.delta_t = np.ediff1d(self.t, to_begin=0)
self.k = self.delta_f[1:]/self.delta_t[1:]
self.fc_mat = np.tile(
self.transmitter.fc_vect[np.newaxis, :, np.newaxis],
(self.channel_size, 1, self.samples_per_pulse)
)
self.f_offset_mat = np.tile(
self.transmitter.f_offset[np.newaxis, :, np.newaxis],
(self.channel_size, 1, self.samples_per_pulse)
)
beat_time_samples = np.arange(0,
self.samples_per_pulse,
1) / self.receiver.fs
self.beat_time = np.tile(
beat_time_samples[np.newaxis, np.newaxis, ...],
(self.channel_size, self.transmitter.pulses, 1)
)
if self.transmitter.pn_f is not None and self.transmitter.pn_power is not None:
dummy_sig = np.ones(
(self.channel_size*self.frames*self.transmitter.pulses,
self.samples_per_pulse))
self.phase_noise = cal_phase_noise(
dummy_sig,
self.receiver.fs,
self.transmitter.pn_f,
self.transmitter.pn_power,
seed=seed,
validation=self.validation)
self.phase_noise = np.reshape(self.phase_noise, (
self.channel_size*self.frames,
self.transmitter.pulses,
self.samples_per_pulse
))
else:
self.phase_noise = None
def gen_timestamp(self):
channel_size = self.channel_size
rx_channel_size = self.receiver.channel_size
pulses = self.transmitter.pulses
samples = self.samples_per_pulse
crp = self.transmitter.prp
delay = self.transmitter.delay
fs = self.receiver.fs
chirp_delay = np.tile(
np.expand_dims(
np.expand_dims(np.cumsum(crp)-crp[0], axis=1),
axis=0),
(channel_size, 1, samples))
tx_idx = np.arange(0, channel_size)/rx_channel_size
tx_delay = np.tile(
np.expand_dims(
np.expand_dims(delay[tx_idx.astype(int)], axis=1),
axis=2),
(1, pulses, samples))
timestamp = tx_delay+chirp_delay+np.tile(
np.expand_dims(
np.expand_dims(np.arange(0, samples), axis=0),
axis=0),
(channel_size, pulses, 1))/fs
if self.frames > 1:
toffset = np.repeat(
np.tile(
np.expand_dims(
np.expand_dims(self.t_offset, axis=1), axis=2), (
1, self.transmitter.pulses, self.samples_per_pulse
)), self.channel_size, axis=0)
timestamp = np.tile(timestamp, (self.frames, 1, 1)) + toffset
elif self.frames == 1:
timestamp = timestamp + self.t_offset
return timestamp
def cal_frame_phases(self):
pulse_phs = self.transmitter.pulse_mod
pulse_phs = np.repeat(pulse_phs, self.receiver.channel_size, axis=0)
pulse_phs = np.repeat(pulse_phs, self.frames, axis=0)
return pulse_phs
def cal_code_timestamp(self):
chip_length = np.expand_dims(
np.array(self.transmitter.chip_length),
axis=1)
code_sequence = chip_length*np.tile(
np.expand_dims(
np.arange(0, self.transmitter.max_code_length),
axis=0),
(self.transmitter.channel_size, 1))
code_timestamp = np.repeat(
code_sequence, self.receiver.channel_size, axis=0)
code_timestamp = np.repeat(
code_timestamp, self.frames, axis=0)
return code_timestamp
|
MIT License
|
checkpointsw/karta
|
src/libs/icu.py
|
icuSeeker.identifyVersions
|
python
|
def identifyVersions(self, logger):
return self._version_strings
|
Identify the version(s) of the library (assuming it was already found).
Assumptions:
1. searchLib() was called before calling identifyVersions()
2. The call to searchLib() returned a number > 0
Args:
logger (logger): elementals logger instance
Return Value:
list of Textual ID(s) of the library's version(s)
|
https://github.com/checkpointsw/karta/blob/b845928487b50a5b41acd532ae0399177a4356aa/src/libs/icu.py#L43-L57
|
from .lib_template import *
class icuSeeker(Seeker):
NAME = "icu"
def searchLib(self, logger):
key_string = "ICU_TIMEZONE_FILES_DIR"
key_indices = []
for idx, bin_str in enumerate(self._all_strings):
if key_string in str(bin_str):
logger.debug(f"Located a key string of {self.NAME} in address 0x{bin_str.ea:x}")
key_indices.append(idx)
break
self._version_strings = []
for key_index in key_indices:
for bin_str in self._all_strings[max(key_index - 10000, 0):min(key_index + 10000, len(self._all_strings))]:
cur_str = str(bin_str)
if cur_str.find(".") == 2 and len(cur_str) == 4 and cur_str[0].isdigit() and cur_str[1].isdigit() and cur_str[3].isdigit():
logger.debug(f"Located a version string of {self.NAME} in address 0x{bin_str.ea:x}")
self._version_strings.append(cur_str)
break
return len(self._version_strings)
|
MIT License
|
miso-belica/sumy
|
sumy/summarizers/kl.py
|
KLSummarizer.compute_tf
|
python
|
def compute_tf(self, sentences):
content_words = self._get_all_content_words_in_doc(sentences)
content_words_count = len(content_words)
content_words_freq = self._compute_word_freq(content_words)
content_word_tf = dict((w, f / content_words_count) for w, f in content_words_freq.items())
return content_word_tf
|
Computes the normalized term frequency as explained in http://www.tfidf.com/
:type sentences: [sumy.models.dom.Sentence]
|
https://github.com/miso-belica/sumy/blob/5715e526830936d75645fd73ae54a41a870113a2/sumy/summarizers/kl.py#L54-L64
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from ._summarizer import AbstractSummarizer
class KLSummarizer(AbstractSummarizer):
stop_words = frozenset()
def __call__(self, document, sentences_count):
sentences = document.sentences
ratings = self._compute_ratings(sentences)
return self._get_best_sentences(sentences, sentences_count, ratings)
@staticmethod
def _get_all_words_in_doc(sentences):
return [w for s in sentences for w in s.words]
def _get_content_words_in_sentence(self, sentence):
normalized_words = self._normalize_words(sentence.words)
normalized_content_words = self._filter_out_stop_words(normalized_words)
return normalized_content_words
def _normalize_words(self, words):
return [self.normalize_word(w) for w in words]
def _filter_out_stop_words(self, words):
return [w for w in words if w not in self.stop_words]
@staticmethod
def _compute_word_freq(list_of_words):
word_freq = {}
for w in list_of_words:
word_freq[w] = word_freq.get(w, 0) + 1
return word_freq
def _get_all_content_words_in_doc(self, sentences):
all_words = self._get_all_words_in_doc(sentences)
content_words = self._filter_out_stop_words(all_words)
normalized_content_words = self._normalize_words(content_words)
return normalized_content_words
|
Apache License 2.0
|
mashery/io-wraps
|
google-apis-client-generator/src/googleapis/codegen/template_helpers.py
|
DoIndent
|
python
|
def DoIndent(parser, token):
try:
unused_tag_name, levels = token.split_contents()
except ValueError:
levels = 1
nodelist = parser.parse(('endindent',))
parser.delete_first_token()
return IndentNode(nodelist, levels)
|
Increase the indent level for indenting.
Usage:
{% indent [levels] %} text... {% endindent %}
Increase the indent on all lines of text by levels * LEVEL_INDENT
Args:
parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a IndentNode
|
https://github.com/mashery/io-wraps/blob/7dfbb468ea3d2fe3a8601bcbe9d1be5d411a825d/google-apis-client-generator/src/googleapis/codegen/template_helpers.py#L467-L488
|
__author__ = 'aiuto@google.com (Tony Aiuto)'
import os
import re
import string
import textwrap
import django.template as django_template
from django.template.loader import render_to_string
register = django_template.Library()
_COPYRIGHT_TEXT = """
Copyright (c) 2010 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
_LANGUAGE = '_LANGUAGE'
_LINE_BREAK_INDENT = '_LINE_BREAK_INDENT'
_LINE_WIDTH = '_LINE_WIDTH'
_PARAMETER_INDENT = '_PARAMETER_INDENT'
_LEVEL_INDENT = '_LEVEL_INDENT'
_COMMENT_START = '_COMMENT_START'
_COMMENT_CONTINUE = '_COMMENT_CONTINUE'
_COMMENT_END = '_COMMENT_END'
_DOC_COMMENT_START = '_DOC_COMMENT_START'
_DOC_COMMENT_CONTINUE = '_DOC_COMMENT_CONTINUE'
_DOC_COMMENT_END = '_DOC_COMMENT_END'
_DOC_COMMENT_BEGIN_TAG = '_DOC_COMMENT_BEGIN_TAG'
_DOC_COMMENT_END_TAG = '_DOC_COMMENT_END_TAG'
_CURRENT_INDENT = '_CURRENT_INDENT'
_CURRENT_LEVEL = '_CURRENT_LEVEL'
_PARAMETER_DOC_INDENT = '_PARAMETER_DOC_INDENT'
_IMPORT_REGEX = '_IMPORT_REGEX'
_IMPORT_TEMPLATE = '_IMPORT_TEMPLATE'
_defaults = {
_LINE_BREAK_INDENT: 4,
_LINE_WIDTH: 40,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 4,
_COMMENT_START: '# ',
_COMMENT_CONTINUE: '# ',
_COMMENT_END: '',
_DOC_COMMENT_START: '# ',
_PARAMETER_DOC_INDENT: 6,
_DOC_COMMENT_CONTINUE: '# ',
_DOC_COMMENT_END: '',
_DOC_COMMENT_BEGIN_TAG: '',
_DOC_COMMENT_END_TAG: '',
_IMPORT_REGEX: r'^\s*import\s+(?P<import>[a-zA-Z0-9.]+)',
_IMPORT_TEMPLATE: 'import %s',
}
_language_defaults = {
'csharp': {
_LINE_WIDTH: 120,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 4,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '/// ',
_DOC_COMMENT_CONTINUE: '/// ',
_DOC_COMMENT_BEGIN_TAG: '<summary>',
_DOC_COMMENT_END_TAG: '</summary>',
},
'go': {
_LINE_WIDTH: 120,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 8,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '// ',
_DOC_COMMENT_CONTINUE: '// '
},
'java': {
_LINE_WIDTH: 100,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
_PARAMETER_DOC_INDENT: 6,
_IMPORT_REGEX: r'^\s*import\s+(?P<import>[a-zA-Z0-9.]+);',
_IMPORT_TEMPLATE: 'import %s;',
},
'objc': {
_LINE_WIDTH: 80,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '// ',
_DOC_COMMENT_CONTINUE: '// ',
},
'php': {
_LINE_BREAK_INDENT: 4,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 4,
_LINE_WIDTH: 80,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
},
}
def _GetFromContext(context, *variable):
for v in variable:
ret = context.get(v)
if ret:
return ret
for v in variable:
ret = _defaults.get(v)
if ret:
return ret
return ''
def _DivideIntoBlocks(lines, prefix):
block = []
prefix = prefix.rstrip()
for line in lines:
if line.startswith(prefix):
line = line[len(prefix):].strip()
if not line:
if block:
yield block
block = []
continue
block.append(line)
if block:
yield block
def _ExtractCommentPrefix(line):
got_tag = False
prefix_length = 0
for c in line:
if c == ' ':
if got_tag:
break
prefix_length += 1
elif c in string.punctuation:
got_tag = True
prefix_length += 1
else:
break
return line[:prefix_length]
@register.filter
def java_comment_fragment(value, indent):
if not indent:
indent = 0
prefix = '%s * ' % (' ' * indent)
wrapper = textwrap.TextWrapper(width=_language_defaults['java'][_LINE_WIDTH],
replace_whitespace=False,
initial_indent=prefix,
subsequent_indent=prefix)
wrapped = wrapper.fill(value)
if wrapped.startswith(prefix):
wrapped = wrapped[len(prefix):]
return wrapped
@register.filter
def java_parameter_wrap(value):
indent = _language_defaults['java'][_PARAMETER_DOC_INDENT]
prefix = ' * %s ' % (' ' * indent)
wrapper = textwrap.TextWrapper(width=_language_defaults['java'][_LINE_WIDTH],
replace_whitespace=False,
initial_indent='',
subsequent_indent=prefix)
wrapped = wrapper.fill(value)
return wrapped
@register.filter
def block_comment(value):
if not value:
return ''
lines = value.split('\n')
leading_blank = False
if not lines[0]:
leading_blank = True
comment_prefix = _ExtractCommentPrefix(lines[1])
else:
comment_prefix = _ExtractCommentPrefix(lines[0])
wrapper = textwrap.TextWrapper(width=_language_defaults['java'][_LINE_WIDTH],
replace_whitespace=False,
initial_indent=('%s ' % comment_prefix),
subsequent_indent=('%s ' % comment_prefix))
wrapped_blocks = []
for block in _DivideIntoBlocks(lines, comment_prefix):
wrapped_blocks.append(wrapper.fill(' '.join(block)))
ret = ''
if leading_blank:
ret = '\n'
return ret + ('\n%s\n' % comment_prefix).join(wrapped_blocks)
@register.filter
def noblanklines(value):
return '\n'.join([line for line in value.split('\n') if line.strip()])
@register.filter
def collapse_blanklines(value):
lines = []
previous_blank = False
for line in value.split('\n'):
if not line.strip():
if not previous_blank:
lines.append(line)
previous_blank = True
else:
pass
else:
lines.append(line)
previous_blank = False
return '\n'.join(lines)
class LanguageNode(django_template.Node):
def __init__(self, language):
self._language = language
def render(self, context):
try:
context.autoescape = False
except AttributeError:
pass
context[_LANGUAGE] = self._language
per_language_defaults = _language_defaults.get(self._language)
if per_language_defaults:
context.update(per_language_defaults)
context[_CURRENT_INDENT] = 0
context[_CURRENT_LEVEL] = 0
return ''
@register.tag(name='language')
def DoLanguage(unused_parser, token):
try:
unused_tag_name, language = token.split_contents()
except ValueError:
raise django_template.TemplateSyntaxError(
'language tag requires a single argument: %s' % token.contents)
return LanguageNode(language)
class IndentNode(django_template.Node):
def __init__(self, nodelist, levels):
self._nodelist = nodelist
self._levels = int(levels)
def render(self, context):
current_indent = context.get(_CURRENT_INDENT, 0)
current_indent_level = context.get(_CURRENT_LEVEL, 0)
extra = (_GetFromContext(context, _LEVEL_INDENT) * self._levels)
context[_CURRENT_INDENT] = current_indent + extra
context[_CURRENT_LEVEL] = current_indent_level + self._levels
lines = self._nodelist.render(context)
context[_CURRENT_INDENT] = current_indent
context[_CURRENT_LEVEL] = current_indent_level
prefix = ' ' * extra
def _PrefixNonBlank(s):
x = s.rstrip()
if x:
x = '%s%s' % (prefix, x)
return x
return '\n'.join([_PrefixNonBlank(line) for line in lines.split('\n')])
@register.tag(name='indent')
|
MIT License
|
jalanb/co.py.cat
|
copycat/coderack.py
|
CodeRack.proposeRule
|
python
|
def proposeRule(self, facet, description, category, relation, oldCodelet):
from rule import Rule
rule = Rule(facet, description, category, relation)
rule.updateStrength()
if description and relation:
depths = description.conceptualDepth + relation.conceptualDepth
depths /= 200.0
urgency = math.sqrt(depths) * 100.0
else:
urgency = 0
self.newCodelet('rule-strength-tester', oldCodelet, urgency, rule)
|
Creates a proposed rule, and posts a rule-strength-tester codelet.
The new codelet has urgency a function of
the degree of conceptual-depth of the descriptions in the rule
|
https://github.com/jalanb/co.py.cat/blob/a6a56c6e516d7041f5bd4943e9d38f481d396f9e/copycat/coderack.py#L128-L144
|
import re
import inspect
import math
import logging
import random
import formulas
import workspaceFormulas
from slipnet import slipnet
from codelet import Codelet
from coderackPressure import CoderackPressures
NUMBER_OF_BINS = 7
MAX_NUMBER_OF_CODELETS = 100
codeletsUsed = {}
def getUrgencyBin(urgency):
i = int(urgency) * NUMBER_OF_BINS / 100
if i >= NUMBER_OF_BINS:
return NUMBER_OF_BINS
return i + 1
class CodeRack(object):
def __init__(self):
self.speedUpBonds = False
self.removeBreakerCodelets = False
self.removeTerracedScan = False
self.pressures = CoderackPressures()
self.pressures.initialisePressures()
self.reset()
self.initialCodeletNames = ('bottom-up-bond-scout',
'replacement-finder',
'bottom-up-correspondence-scout')
self.codeletMethodsDir = None
self.runCodelets = {}
self.postings = {}
def reset(self):
from temperature import temperature
self.codelets = []
self.codeletsRun = 0
temperature.clamped = True
self.pressures.reset()
def updateCodelets(self):
if self.codeletsRun > 0:
self.postTopDownCodelets()
self.postBottomUpCodelets()
def post(self, codelet):
self.postings[codelet.name] = self.postings.get(codelet.name, 0) + 1
self.pressures.addCodelet(codelet)
self.codelets += [codelet]
if len(self.codelets) > 100:
oldCodelet = self.chooseOldCodelet()
self.removeCodelet(oldCodelet)
def postTopDownCodelets(self):
for node in slipnet.slipnodes:
logging.info('Trying slipnode: %s', node.get_name())
if node.activation != 100.0:
continue
logging.info('using slipnode: %s', node.get_name())
for codeletName in node.codelets:
probability = workspaceFormulas.probabilityOfPosting(
codeletName)
howMany = workspaceFormulas.howManyToPost(codeletName)
for _ in range(0, howMany):
if random.random() >= probability:
continue
urgency = getUrgencyBin(
node.activation * node.conceptualDepth / 100.0)
codelet = Codelet(codeletName, urgency, self.codeletsRun)
codelet.arguments += [node]
logging.info('Post top down: %s, with urgency: %d',
codelet.name, urgency)
self.post(codelet)
def postBottomUpCodelets(self):
logging.info("posting bottom up codelets")
self.__postBottomUpCodelets('bottom-up-description-scout')
self.__postBottomUpCodelets('bottom-up-bond-scout')
self.__postBottomUpCodelets('group-scout--whole-string')
self.__postBottomUpCodelets('bottom-up-correspondence-scout')
self.__postBottomUpCodelets('important-object-correspondence-scout')
self.__postBottomUpCodelets('replacement-finder')
self.__postBottomUpCodelets('rule-scout')
self.__postBottomUpCodelets('rule-translator')
if not self.removeBreakerCodelets:
self.__postBottomUpCodelets('breaker')
def __postBottomUpCodelets(self, codeletName):
probability = workspaceFormulas.probabilityOfPosting(codeletName)
howMany = workspaceFormulas.howManyToPost(codeletName)
if self.speedUpBonds:
if 'bond' in codeletName or 'group' in codeletName:
howMany *= 3
urgency = 3
if codeletName == 'breaker':
urgency = 1
if formulas.Temperature < 25.0 and 'translator' in codeletName:
urgency = 5
for _ in range(0, howMany):
if random.random() < probability:
codelet = Codelet(codeletName, urgency, self.codeletsRun)
self.post(codelet)
def removeCodelet(self, codelet):
self.codelets.remove(codelet)
self.pressures.removeCodelet(codelet)
def newCodelet(self, name, oldCodelet, strength, arguments=None):
logging.debug('Posting new codelet called %s', name)
urgency = getUrgencyBin(strength)
newCodelet = Codelet(name, urgency, self.codeletsRun)
if arguments:
newCodelet.arguments = [arguments]
else:
newCodelet.arguments = oldCodelet.arguments
newCodelet.pressure = oldCodelet.pressure
self.tryRun(newCodelet)
|
MIT License
|
zengyi-qin/tlnet
|
avod/core/format_checker.py
|
check_box_3d_format
|
python
|
def check_box_3d_format(input_data):
if isinstance(input_data, np.ndarray):
if input_data.ndim == 2:
if input_data.shape[1] != 7:
raise TypeError('Given input does not have valid number of '
'attributes. Should be N x 7 for box_3d.')
elif input_data.ndim == 1:
if input_data.shape[0] != 7:
raise TypeError('Given input does not have valid number of '
'attributes. Should be 7 for box_3d.')
elif isinstance(input_data, tf.Tensor):
if isinstance(input_data, tf.Tensor):
if input_data.shape[1] != 7:
raise TypeError('Given input does not have valid number of '
'attributes. Should be N x 7 for box_3d.')
else:
raise TypeError('Given input is not of valid types.'
'(i.e. np.ndarray or tf.Tensor)')
|
Checks for correct box_3d format. If not proper type, raises error.
Args:
input_data: input numpy array or tensor to check for valid box_3d format
|
https://github.com/zengyi-qin/tlnet/blob/11fa48160158b550ad2dc810ed564eebe17e8f5e/avod/core/format_checker.py#L80-L107
|
import numpy as np
import tensorflow as tf
def check_anchor_format(input_data):
if isinstance(input_data, np.ndarray):
if input_data.ndim == 2:
if input_data.shape[1] != 6:
raise TypeError('Given input does not have valid number of '
'attributes. Should be N x 6 for anchor.')
elif input_data.ndim == 1:
if input_data.shape[0] != 6:
raise TypeError('Given input does not have valid number of '
'attributes. Should be 6 for anchor.')
elif isinstance(input_data, tf.Tensor):
if isinstance(input_data, tf.Tensor):
if input_data.shape[1] != 6:
raise TypeError('Given input does not have valid number of '
'attributes. Should be N x 6 for box_3d.')
else:
raise TypeError('Given input is not of valid types.'
'(i.e. np.ndarray or tf.Tensor)')
|
Apache License 2.0
|
yinboc/few-shot-meta-baseline
|
meta-dataset/meta_dataset/data/reader.py
|
dataset_id_generator
|
python
|
def dataset_id_generator(dataset_spec, split, pool, sampler):
chunk_sizes = sampler.compute_chunk_sizes()
flush_chunk_size, other_chunk_sizes = chunk_sizes[0], chunk_sizes[1:]
class_set = dataset_spec.get_classes(split)
num_classes = len(class_set)
dummy_dataset_id = num_classes
total_images_per_class = dict(
(class_idx,
dataset_spec.get_total_images_per_class(class_set[class_idx], pool))
for class_idx in range(num_classes))
cursors = [0] * num_classes
while True:
flushed_dataset_indices = []
selected_dataset_indices = [[] for _ in other_chunk_sizes]
episode_description = sampler.sample_episode_description()
for element in episode_description:
class_idx, distribution = element[0], element[1:]
total_requested = sum(distribution)
if total_requested > total_images_per_class[class_idx]:
raise ValueError("Requesting more images than what's available for the "
'whole class')
remaining = total_images_per_class[class_idx] - cursors[class_idx]
if total_requested > remaining:
flushed_dataset_indices.extend([class_idx] * remaining)
cursors[class_idx] = 0
for num_to_allocate, dataset_indices in zip(distribution,
selected_dataset_indices):
dataset_indices.extend([class_idx] * num_to_allocate)
cursors[class_idx] += total_requested
_pad(flushed_dataset_indices, flush_chunk_size, dummy_dataset_id)
for dataset_indices, chunk_size in zip(selected_dataset_indices,
other_chunk_sizes):
_pad(dataset_indices, chunk_size, dummy_dataset_id)
dataset_indices = itertools.chain(flushed_dataset_indices,
*selected_dataset_indices)
for i in dataset_indices:
yield i
|
Generates a stream of dataset IDs forming a sequence of episodes.
Each episode is chunked into:
* a "flush" chunk, which is meant to allow to flush examples, in case we are
at the end of an epoch for one or more class in the episode (we want to
avoid accidentally repeating an example due to epoch boundaries), and
* some number of additional chunks (for example, a "support" chunk and a
"query" chunk).
To make sure the input pipeline knows where the episode boundary is within the
stream (and where the boundary is between chunks in an episode), we enforce
that each chunk has a fixed size by padding with dummy dataset IDs (of value
`num_classes`) as needed (in some cases it's possible that no padding is ever
needed). The size of each chunk is prescribed by the `compute_chunk_sizes`
method of `sampler`, which also implicitly defines the number of additional
chunks (i.e. `len(chunk_sizes) - 1`).
This generator is meant to be used with
`tf.data.experimental.choose_from_datasets` and assumes that the list of
tf.data.Dataset objects corresponding to each class in the dataset (there are
`num_classes` of them, which is determined by inspecting the `dataset_spec`
argument using the `split` argument) is appended with a "dummy" Dataset (which
has index `num_classes` in the list) which outputs a constant `(b'',
DUMMY_CLASS_ID)` tuple).
Note that a dataset ID is different from the (absolute) class ID: the dataset
ID refers to the index of the Dataset in the list of Dataset objects, and the
class ID (or label) refers to the second element of the tuple that the Dataset
outputs.
Args:
dataset_spec: DatasetSpecification, dataset specification.
split: one of Split.TRAIN, Split.VALID, or Split.TEST.
pool: A string ('train' or 'test') or None, indicating which example-level
split to select, if the current dataset has them.
sampler: EpisodeDescriptionSampler instance.
Yields:
i: int, dataset ID.
|
https://github.com/yinboc/few-shot-meta-baseline/blob/779fae39dad3537e7c801049c858923e2a352dfe/meta-dataset/meta_dataset/data/reader.py#L58-L165
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import os
from meta_dataset import data
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
DUMMY_CLASS_ID = -1
def _pad(dataset_indices, chunk_size, dummy_dataset_id):
pad_size = chunk_size - len(dataset_indices)
assert pad_size >= 0
dataset_indices.extend([dummy_dataset_id] * pad_size)
|
MIT License
|
kmod/icbd
|
stdlib/python2.5/commands.py
|
getstatus
|
python
|
def getstatus(file):
return getoutput('ls -ld' + mkarg(file))
|
Return output of "ls -ld <file>" in a string.
|
https://github.com/kmod/icbd/blob/9636564eb3993afa07c6220d589bbd1991923d74/stdlib/python2.5/commands.py#L33-L35
|
__all__ = ["getstatusoutput","getoutput","getstatus"]
|
MIT License
|
dwavesystems/dwave-system
|
dwave/embedding/chain_breaks.py
|
weighted_random
|
python
|
def weighted_random(samples, chains):
samples, labels = dimod.as_samples(samples)
if labels != range(len(labels)):
relabel = {v: idx for idx, v in enumerate(labels)}
chains = [[relabel[v] for v in chain] for chain in chains]
else:
chains = list(map(list, chains))
idx = [np.random.choice(chain) for chain in chains]
num_samples, num_variables = samples.shape
return samples[:, idx], np.arange(num_samples)
|
Unembed samples using weighed random choice for broken chains.
Args:
samples (samples_like):
A collection of samples. `samples_like` is an extension of NumPy's
array_like. See :func:`dimod.as_samples`.
chains (list[array_like]):
List of chains, where each chain is an array_like collection of
the variables in the same order as their represention in the given
samples.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Unembedded samples as an nS-by-nC array of
dtype 'int8', where nC is the number of chains and nS the number
of samples. Broken chains are resolved by setting the sample value to
a random value weighted by frequency of the value in the chain.
:obj:`numpy.ndarray`: Indicies of the samples. Equivalent to
:code:`np.arange(nS)` because all samples are kept
and no samples are added.
Examples:
This example unembeds samples from a target graph that chains nodes 0 and 1 to
represent one source node and nodes 2, 3, and 4 to represent another.
The sample has broken chains for both source nodes.
>>> import dimod
>>> import numpy as np
...
>>> chains = [(0, 1), (2, 3, 4)]
>>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8)
>>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP
>>> unembedded # doctest: +SKIP
array([[1, 1]], dtype=int8)
>>> idx # doctest: +SKIP
array([0, 1])
|
https://github.com/dwavesystems/dwave-system/blob/10acaa5daa2cb0337a6a961b2f5c41af6020bea3/dwave/embedding/chain_breaks.py#L225-L279
|
from collections import Callable
from heapq import heapify, heappop
import numpy as np
import dimod
__all__ = ['broken_chains',
'discard',
'majority_vote',
'weighted_random',
'MinimizeEnergy',
]
def broken_chains(samples, chains):
samples, labels = dimod.as_samples(samples)
if labels != range(len(labels)):
relabel = {v: idx for idx, v in enumerate(labels)}
chains = [[relabel[v] for v in chain] for chain in chains]
else:
chains = list(map(list, chains))
num_samples, num_variables = samples.shape
num_chains = len(chains)
broken = np.zeros((num_samples, num_chains), dtype=bool, order='F')
for cidx, chain in enumerate(chains):
if isinstance(chain, set):
chain = list(chain)
chain = np.asarray(chain)
if chain.ndim > 1:
raise ValueError("chains should be 1D array_like objects")
if len(chain) <= 1:
continue
all_ = (samples[:, chain] == 1).all(axis=1)
any_ = (samples[:, chain] == 1).any(axis=1)
broken[:, cidx] = np.bitwise_xor(all_, any_)
return broken
def discard(samples, chains):
samples, labels = dimod.as_samples(samples)
if labels != range(len(labels)):
relabel = {v: idx for idx, v in enumerate(labels)}
chains = [[relabel[v] for v in chain] for chain in chains]
else:
chains = list(map(list, chains))
num_samples, num_variables = samples.shape
num_chains = len(chains)
broken = broken_chains(samples, chains)
unbroken_idxs, = np.where(~broken.any(axis=1))
chain_variables = np.fromiter((np.asarray(tuple(chain))[0] if isinstance(chain, set) else np.asarray(chain)[0]
for chain in chains),
count=num_chains, dtype=int)
return samples[np.ix_(unbroken_idxs, chain_variables)], unbroken_idxs
def majority_vote(samples, chains):
samples, labels = dimod.as_samples(samples)
if labels != range(len(labels)):
relabel = {v: idx for idx, v in enumerate(labels)}
chains = [[relabel[v] for v in chain] for chain in chains]
else:
chains = list(map(list, chains))
num_samples, num_variables = samples.shape
num_chains = len(chains)
unembedded = np.empty((num_samples, num_chains), dtype='int8', order='F')
if samples.all():
for cidx, chain in enumerate(chains):
unembedded[:, cidx] = 2*(samples[:, chain].sum(axis=1) >= 0) - 1
else:
for cidx, chain in enumerate(chains):
mid = len(chain) / 2
unembedded[:, cidx] = (samples[:, chain].sum(axis=1) >= mid)
return unembedded, np.arange(num_samples)
|
Apache License 2.0
|
docusign/docusign-python-client
|
docusign_esign/models/display_appliance_recipient.py
|
DisplayApplianceRecipient.recipient_type
|
python
|
def recipient_type(self, recipient_type):
self._recipient_type = recipient_type
|
Sets the recipient_type of this DisplayApplianceRecipient.
:param recipient_type: The recipient_type of this DisplayApplianceRecipient.
:type: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/display_appliance_recipient.py#L602-L611
|
from pprint import pformat
from six import iteritems
import re
class DisplayApplianceRecipient(object):
def __init__(self, cfr_part11=None, company=None, custom_field2=None, digital_signature_base64=None, digital_signatures_pending=None, email=None, first_name=None, full_name=None, initials_base64=None, in_person_email=None, is_notary=None, is_notary_transaction=None, job_title=None, last_name=None, notary_seal_base64=None, phone_number=None, recipient_complete_count=None, recipient_guid_id=None, recipient_id=None, recipient_status=None, recipient_type=None, require_signer_certificate=None, row_state=None, signature_base64=None, signature_image_id=None, signed=None, signer_apply_tabs=None, signer_attachment_base64=None, user_id=None, user_name=None):
self.swagger_types = {
'cfr_part11': 'bool',
'company': 'str',
'custom_field2': 'str',
'digital_signature_base64': 'str',
'digital_signatures_pending': 'str',
'email': 'str',
'first_name': 'str',
'full_name': 'str',
'initials_base64': 'str',
'in_person_email': 'str',
'is_notary': 'bool',
'is_notary_transaction': 'bool',
'job_title': 'str',
'last_name': 'str',
'notary_seal_base64': 'str',
'phone_number': 'str',
'recipient_complete_count': 'int',
'recipient_guid_id': 'str',
'recipient_id': 'str',
'recipient_status': 'str',
'recipient_type': 'str',
'require_signer_certificate': 'str',
'row_state': 'str',
'signature_base64': 'str',
'signature_image_id': 'str',
'signed': 'bool',
'signer_apply_tabs': 'bool',
'signer_attachment_base64': 'str',
'user_id': 'str',
'user_name': 'str'
}
self.attribute_map = {
'cfr_part11': 'cfrPart11',
'company': 'company',
'custom_field2': 'customField2',
'digital_signature_base64': 'digitalSignatureBase64',
'digital_signatures_pending': 'digitalSignaturesPending',
'email': 'email',
'first_name': 'firstName',
'full_name': 'fullName',
'initials_base64': 'initialsBase64',
'in_person_email': 'inPersonEmail',
'is_notary': 'isNotary',
'is_notary_transaction': 'isNotaryTransaction',
'job_title': 'jobTitle',
'last_name': 'lastName',
'notary_seal_base64': 'notarySealBase64',
'phone_number': 'phoneNumber',
'recipient_complete_count': 'recipientCompleteCount',
'recipient_guid_id': 'recipientGuidId',
'recipient_id': 'recipientId',
'recipient_status': 'recipientStatus',
'recipient_type': 'recipientType',
'require_signer_certificate': 'requireSignerCertificate',
'row_state': 'rowState',
'signature_base64': 'signatureBase64',
'signature_image_id': 'signatureImageId',
'signed': 'signed',
'signer_apply_tabs': 'signerApplyTabs',
'signer_attachment_base64': 'signerAttachmentBase64',
'user_id': 'userId',
'user_name': 'userName'
}
self._cfr_part11 = cfr_part11
self._company = company
self._custom_field2 = custom_field2
self._digital_signature_base64 = digital_signature_base64
self._digital_signatures_pending = digital_signatures_pending
self._email = email
self._first_name = first_name
self._full_name = full_name
self._initials_base64 = initials_base64
self._in_person_email = in_person_email
self._is_notary = is_notary
self._is_notary_transaction = is_notary_transaction
self._job_title = job_title
self._last_name = last_name
self._notary_seal_base64 = notary_seal_base64
self._phone_number = phone_number
self._recipient_complete_count = recipient_complete_count
self._recipient_guid_id = recipient_guid_id
self._recipient_id = recipient_id
self._recipient_status = recipient_status
self._recipient_type = recipient_type
self._require_signer_certificate = require_signer_certificate
self._row_state = row_state
self._signature_base64 = signature_base64
self._signature_image_id = signature_image_id
self._signed = signed
self._signer_apply_tabs = signer_apply_tabs
self._signer_attachment_base64 = signer_attachment_base64
self._user_id = user_id
self._user_name = user_name
@property
def cfr_part11(self):
return self._cfr_part11
@cfr_part11.setter
def cfr_part11(self, cfr_part11):
self._cfr_part11 = cfr_part11
@property
def company(self):
return self._company
@company.setter
def company(self, company):
self._company = company
@property
def custom_field2(self):
return self._custom_field2
@custom_field2.setter
def custom_field2(self, custom_field2):
self._custom_field2 = custom_field2
@property
def digital_signature_base64(self):
return self._digital_signature_base64
@digital_signature_base64.setter
def digital_signature_base64(self, digital_signature_base64):
self._digital_signature_base64 = digital_signature_base64
@property
def digital_signatures_pending(self):
return self._digital_signatures_pending
@digital_signatures_pending.setter
def digital_signatures_pending(self, digital_signatures_pending):
self._digital_signatures_pending = digital_signatures_pending
@property
def email(self):
return self._email
@email.setter
def email(self, email):
self._email = email
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, first_name):
self._first_name = first_name
@property
def full_name(self):
return self._full_name
@full_name.setter
def full_name(self, full_name):
self._full_name = full_name
@property
def initials_base64(self):
return self._initials_base64
@initials_base64.setter
def initials_base64(self, initials_base64):
self._initials_base64 = initials_base64
@property
def in_person_email(self):
return self._in_person_email
@in_person_email.setter
def in_person_email(self, in_person_email):
self._in_person_email = in_person_email
@property
def is_notary(self):
return self._is_notary
@is_notary.setter
def is_notary(self, is_notary):
self._is_notary = is_notary
@property
def is_notary_transaction(self):
return self._is_notary_transaction
@is_notary_transaction.setter
def is_notary_transaction(self, is_notary_transaction):
self._is_notary_transaction = is_notary_transaction
@property
def job_title(self):
return self._job_title
@job_title.setter
def job_title(self, job_title):
self._job_title = job_title
@property
def last_name(self):
return self._last_name
@last_name.setter
def last_name(self, last_name):
self._last_name = last_name
@property
def notary_seal_base64(self):
return self._notary_seal_base64
@notary_seal_base64.setter
def notary_seal_base64(self, notary_seal_base64):
self._notary_seal_base64 = notary_seal_base64
@property
def phone_number(self):
return self._phone_number
@phone_number.setter
def phone_number(self, phone_number):
self._phone_number = phone_number
@property
def recipient_complete_count(self):
return self._recipient_complete_count
@recipient_complete_count.setter
def recipient_complete_count(self, recipient_complete_count):
self._recipient_complete_count = recipient_complete_count
@property
def recipient_guid_id(self):
return self._recipient_guid_id
@recipient_guid_id.setter
def recipient_guid_id(self, recipient_guid_id):
self._recipient_guid_id = recipient_guid_id
@property
def recipient_id(self):
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
self._recipient_id = recipient_id
@property
def recipient_status(self):
return self._recipient_status
@recipient_status.setter
def recipient_status(self, recipient_status):
self._recipient_status = recipient_status
@property
def recipient_type(self):
return self._recipient_type
@recipient_type.setter
|
MIT License
|
staugur/flask-pluginkit
|
flask_pluginkit/pluginkit.py
|
PluginManager._error_handler
|
python
|
def _error_handler(self, plugin_info, errhandler_rule):
if isinstance(errhandler_rule, dict):
_errhandler_rule = []
for code, func in iteritems(errhandler_rule):
if not isinstance(code, int):
raise PEPError(
"The errhandler code is not interger for %s"
% plugin_info.plugin_name
)
_errhandler_rule.append(dict(error=code, handler=func))
errhandler_rule = _errhandler_rule
if isinstance(errhandler_rule, (tuple, list)):
plugin_errhandler_rules = []
for eh in errhandler_rule:
if (
not isinstance(eh, dict)
or "error" not in eh
or "handler" not in eh
):
raise PEPError(
"The errhandler format error for %s"
% plugin_info.plugin_name
)
code_or_exc = eh["error"]
func = eh["handler"]
if not isinstance(code_or_exc, int):
try:
_is_ok_exc = issubclass(code_or_exc, Exception)
except TypeError:
raise PEPError(
"The errhandler custom error class requires"
" inheritance of Exception for %s"
% plugin_info.plugin_name
)
else:
if not _is_ok_exc:
raise PEPError(
"The errhandler exc is not a subclass of"
" Exception for %s" % plugin_info.plugin_name
)
if not callable(func):
raise PEPError(
"The errhandler func is not called for %s"
% plugin_info.plugin_name
)
plugin_errhandler_rules.append((code_or_exc, func))
plugin_info["plugin_errhandler"] = plugin_errhandler_rules
else:
raise PEPError(
"The error handler rule is invalid for %s, "
"it should be a list or tuple." % plugin_info.plugin_name
)
|
Error code handler.
:param errhandler_rule: eg: {err_code=func} or [{error=exception_class,
handler=func}, {error=err_code, handler=func}]
:raises PEPError: if error handler rule or content is invalid.
.. versionadded:: 3.2.0
.. versionchanged:: 3.4.0
Allow registration of class-based exception handlers
|
https://github.com/staugur/flask-pluginkit/blob/be3175a4ecfd5b9cc9ca4d027346026698409017/flask_pluginkit/pluginkit.py#L816-L880
|
import logging
import warnings
from os import getcwd, listdir, remove
from os.path import join, dirname, abspath, isdir, isfile, splitext
from itertools import chain
from jinja2 import ChoiceLoader, FileSystemLoader
from flask import (
Blueprint,
render_template,
render_template_string,
send_from_directory,
abort,
url_for,
Markup,
current_app,
)
from .utils import isValidPrefix, isValidSemver, Attribution, DcpManager
from ._compat import string_types, iteritems, text_type
from .exceptions import PluginError, VersionError, PEPError, TemplateNotFound
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class PluginManager(object):
def __init__(
self, app=None, plugins_base=None, plugins_folder="plugins", **options
):
self.logger = options.get("logger", logging.getLogger(__name__))
self.stpl = options.get("stpl", False)
if "stpl_reverse" in options:
warnings.warn(
"stpl_reverse: will be removed in the next minor version,"
" please use `stpl` instead.",
)
self.stpl_reverse = options.get("stpl_reverse", False)
if self.stpl in ("asc", "desc", "ASC", "DESC"):
self.stpl = True
self.stpl_reverse = False if self.stpl in ("asc", "ASC") else True
self.plugin_packages = options.get("plugin_packages") or []
if not isinstance(self.plugin_packages, (tuple, list)):
raise PluginError("Invalid plugin_packages")
self.static_endpoint = options.get("static_endpoint") or "assets"
self.static_url_path = (
options.get("static_url_path") or "/%s" % self.static_endpoint
)
if not isValidPrefix(self.static_url_path):
raise PluginError("Invalid static_url_path")
self.pluginkit_config = options.get("pluginkit_config") or {}
if not isinstance(self.pluginkit_config, dict):
raise PluginError("Invalid pluginkit_config")
self.__pet_handlers = {
"tep": self._tep_handler,
"hep": self._hep_handler,
"bep": self._bep_handler,
"vep": self._vep_handler,
"cvep": self._cvep_handler,
"errhandler": self._error_handler,
"filter": self._filter_handler,
"tcp": self._context_processor_handler,
"p3": self._p3_handler,
}
self.__het_allow_hooks = {
"before_request": self.__before_request_hook_handler,
"after_request": self.__after_request_hook_handler,
"teardown_request": self.__teardown_request_hook_handler,
"before_first_request": self.__before_fist_request_hook_handler,
}
self._dcp_manager = DcpManager()
if "try_compatible" in options:
warnings.warn(
"try_compatible: will be removed in the next minor version"
)
self._try_compatible = options.get("try_compatible", True)
self.__plugins = []
if app is not None:
self.init_app(app, plugins_base, plugins_folder)
def init_app(self, app, plugins_base=None, plugins_folder="plugins"):
self.plugins_folder = plugins_folder
self.plugins_abspath = join(
plugins_base or getattr(app, "root_path", getcwd()),
self.plugins_folder,
)
self.logger.debug(
"Start plugins initialization, local plugins path: %s, third party"
"-plugins: %s" % (self.plugins_abspath, self.plugin_packages)
)
self.__scan_third_plugins()
self.__scan_affiliated_plugins()
self.__preprocess_all_plugins()
app.jinja_env.globals.update(
emit_tep=self.emit_tep,
emit_assets=self.emit_assets,
emit_config=self.emit_config,
emit_dcp=self._dcp_manager.emit,
)
app.jinja_loader = ChoiceLoader(
[
app.jinja_loader,
FileSystemLoader(self.__get_valid_tpl),
]
)
app.add_url_rule(
self.static_url_path + "/<string:plugin_name>/<path:filename>",
endpoint=self.static_endpoint,
view_func=self._send_plugin_static_file,
)
for hep, handler in iteritems(self.__het_allow_hooks):
_deco_func = getattr(app, hep)
_deco_func(handler)
_plugin_bps = {}
_nested_bps = {}
for bep in self.get_enabled_beps:
bp = bep["blueprint"]
parent = bep.get("parent")
if parent:
_nested_bps.setdefault(parent, []).append(bep)
else:
_plugin_bps[bp.name] = bep
for parent, beps in iteritems(_nested_bps):
if parent not in _plugin_bps:
raise PEPError("No parent blueprint found named %s" % parent)
pbp = _plugin_bps[parent]["blueprint"]
for bep in beps:
bp = bep["blueprint"]
prefix = bep["prefix"]
pbp.register_blueprint(bp, url_prefix=prefix)
for bep in _plugin_bps.values():
bp = bep["blueprint"]
prefix = bep["prefix"]
app.register_blueprint(bp, url_prefix=prefix)
for vep in self.get_enabled_veps:
rule, viewfunc, endpoint, options, _bp = vep
if _bp:
if _bp in app.blueprints:
s = app.blueprints[_bp].make_setup_state(app, {})
s.add_url_rule(rule, endpoint, viewfunc, **options)
else:
raise PEPError(
"The required blueprint({}) was not found when "
"registering vep with {}".format(_bp, rule)
)
else:
app.add_url_rule(rule, endpoint, viewfunc, **options)
for cvep in self.get_enabled_cveps:
viewclass, options = cvep
viewclass.register(app, **options)
for tf in self.get_enabled_filters:
if tf and tf[0] not in app.jinja_env.filters:
app.add_template_filter(tf[-1], tf[0])
for (err_code_exc, errview) in self.get_enabled_errhandlers:
app.register_error_handler(err_code_exc, errview)
app.template_context_processors[None].append(
lambda: {
k: v
for tcp in self.get_enabled_tcps
for k, v in iteritems(tcp)
}
)
app.extensions = getattr(app, "extensions", None) or {}
app.extensions["pluginkit"] = self
def __scan_third_plugins(self):
if self.plugin_packages and isinstance(
self.plugin_packages, (list, tuple)
):
for package_name in self.plugin_packages:
self.logger.debug(
"find third plugin package: %s" % package_name
)
try:
plugin = __import__(package_name)
except ImportError as e:
raise PluginError(e)
else:
plugin_abspath = dirname(abspath(plugin.__file__))
self.__load_plugin(plugin, plugin_abspath, package_name)
def __scan_affiliated_plugins(self):
if isdir(self.plugins_abspath) and isfile(
join(self.plugins_abspath, "__init__.py")
):
for package_name in listdir(self.plugins_abspath):
package_abspath = join(self.plugins_abspath, package_name)
if isdir(package_abspath) and isfile(
join(package_abspath, "__init__.py")
):
self.logger.debug(
"find local plugin package: %s" % package_name
)
plugin = __import__(
"%s.%s" % (self.plugins_folder, package_name),
fromlist=[
self.plugins_folder,
],
)
self.__load_plugin(plugin, package_abspath, package_name)
def __try_load_oldmeta(self, p_obj):
if hasattr(p_obj, "register"):
return
resp = {}
if hasattr(p_obj, "getPluginClass"):
p = p_obj.getPluginClass()()
if hasattr(p, "register_tep"):
resp["tep"] = p.register_tep()
if hasattr(p, "register_hep"):
heps = p.register_hep()
if isinstance(heps, dict):
resp["hep"] = {
hep_name.replace("_hook", ""): hep_func
for hep_name, hep_func in iteritems(heps)
}
else:
resp["hep"] = heps
if hasattr(p, "register_bep"):
resp["bep"] = p.register_bep()
else:
raise PluginError("Legacy plugin metadata error")
p_obj.register = lambda: resp
def __load_plugin(self, p_obj, package_abspath, package_name):
if self._try_compatible:
self.__try_load_oldmeta(p_obj)
if (
hasattr(p_obj, "__plugin_name__")
and hasattr(p_obj, "__version__")
and hasattr(p_obj, "__author__")
and hasattr(p_obj, "register")
):
pets = p_obj.register()
if isinstance(pets, dict):
plugin_info = self._get_plugin_meta(
p_obj, package_abspath, package_name
)
if plugin_info.plugin_state == "enabled":
for pet, value in iteritems(pets):
try:
self.__pet_handlers[pet](plugin_info, value)
except KeyError:
raise PEPError(
"The plugin %s found an invalid "
"extension point called %s"
% (plugin_info.plugin_name, pet)
)
self.__plugins.append(plugin_info)
else:
raise PEPError(
"When loading %s, the register returns the wrong type, "
"it should be a dict."
% getattr(p_obj, "__plugin_name__", package_name)
)
else:
raise PEPError(
"The plugin %s metadata error"
% getattr(p_obj, "__plugin_name__", package_name)
)
def _get_plugin_meta(self, p_obj, package_abspath, package_name):
if not isValidSemver(p_obj.__version__):
raise VersionError(
"The version number of %s is not compliant, "
"please refer to https://semver.org" % package_name
)
try:
plugin_state = p_obj.__state__
except AttributeError:
plugin_state = "enabled"
if isfile(join(package_abspath, "ENABLED")):
plugin_state = "enabled"
if isfile(join(package_abspath, "DISABLED")):
plugin_state = "disabled"
return Attribution(
{
"plugin_name": p_obj.__plugin_name__,
"plugin_package_name": package_name,
"plugin_package_abspath": package_abspath,
"plugin_description": getattr(p_obj, "__description__", None),
"plugin_version": p_obj.__version__,
"plugin_author": p_obj.__author__,
"plugin_url": getattr(p_obj, "__url__", None),
"plugin_license": getattr(p_obj, "__license__", None),
"plugin_license_file": getattr(
p_obj, "__license_file__", None
),
"plugin_readme_file": getattr(p_obj, "__readme_file__", None),
"plugin_state": plugin_state,
"plugin_tpl_path": join(package_abspath, "templates"),
"plugin_ats_path": join(package_abspath, "static"),
"plugin_tep": {},
"plugin_hep": {},
"plugin_bep": {},
"plugin_vep": [],
"plugin_cvep": [],
"plugin_filter": [],
"plugin_errhandler": [],
"plugin_tcp": {},
"plugin_p3": {},
}
)
def _tep_handler(self, plugin_info, tep_rule):
if isinstance(tep_rule, dict):
plugin_tep = {}
for event, tpl in iteritems(tep_rule):
if isinstance(tpl, string_types):
if splitext(tpl)[-1] in (".html", ".htm", ".xhtml"):
if isfile(
join(
plugin_info.plugin_tpl_path,
tpl.split("@")[-1]
if "@" in tpl and self.stpl is True
else tpl,
)
):
plugin_tep[event] = dict(fil=tpl)
else:
raise TemplateNotFound(
"TEP Template File Not Found: %s" % tpl
)
else:
if not isinstance(tpl, text_type):
tpl = tpl.decode("utf-8")
plugin_tep[event] = dict(cod=tpl)
else:
raise PEPError(
"The tep content is invalid for %s"
% plugin_info.plugin_name
)
plugin_info["plugin_tep"] = plugin_tep
self.logger.debug("Register TEP Success")
else:
raise PEPError(
"The tep rule is invalid for %s, "
"it should be a dict." % plugin_info.plugin_name
)
def _hep_handler(self, plugin_info, hep_rule):
if isinstance(hep_rule, dict):
plugin_hep = {}
for event, func in iteritems(hep_rule):
if event in self.__het_allow_hooks.keys():
if callable(func):
plugin_hep[event] = func
else:
raise PEPError(
"The hep content cannot be called back "
"for %s" % plugin_info.plugin_name
)
else:
raise PEPError(
"The hep type is invalid for %s"
% plugin_info.plugin_name
)
plugin_info["plugin_hep"] = plugin_hep
self.logger.debug("Register HEP Success")
else:
raise PEPError(
"The hep rule is invalid for %s, "
"it should be a dict." % plugin_info.plugin_name
)
def _bep_handler(self, plugin_info, bep_rule):
if (
isinstance(bep_rule, dict)
and "blueprint" in bep_rule
and "prefix" in bep_rule
):
try:
bp = bep_rule["blueprint"]
prefix = bep_rule["prefix"]
except KeyError:
raise PEPError(
"The bep rule is invalid for %s" % plugin_info.plugin_name
)
if not isinstance(bp, Blueprint):
raise PEPError(
"The bep blueprint is invalid for %s"
% plugin_info.plugin_name
)
if not isValidPrefix(prefix, allow_none=True):
raise PEPError(
"The bep prefix is invalid for %s"
% plugin_info.plugin_name
)
plugin_info["plugin_bep"] = bep_rule
self.logger.debug("Register BEP Success")
else:
raise PEPError(
"The bep rule is invalid for %s, "
"it should be a dict." % plugin_info.plugin_name
)
def _vep_handler(self, plugin_info, vep_rule):
if isinstance(vep_rule, dict):
vep_rule = (vep_rule,)
if isinstance(vep_rule, (list, tuple)):
plugin_vep = []
for options in vep_rule:
try:
rule = options.pop("rule")
view_func = options.pop("view_func")
except KeyError:
raise PEPError(
"The vep rule is invalid for %s"
% plugin_info.plugin_name
)
else:
endpoint = options.pop("endpoint", None)
_bp = options.pop("_blueprint", None)
plugin_vep.append(
(rule, view_func, endpoint, options, _bp)
)
plugin_info["plugin_vep"] = plugin_vep
self.logger.debug("Register VEP Success")
else:
raise PEPError(
"The vep rule is invalid for %s, it should be "
"a list or tuple." % plugin_info.plugin_name
)
def _cvep_handler(self, plugin_info, cvep_rule):
if isinstance(cvep_rule, dict):
cvep_rule = (cvep_rule,)
if isinstance(cvep_rule, (list, tuple)):
plugin_cvep = []
for options in cvep_rule:
try:
view_class = options.pop("view_class")
except KeyError:
raise PEPError(
"The cvep rule is invalid for %s"
% plugin_info.plugin_name
)
else:
plugin_cvep.append((view_class, options))
plugin_info["plugin_cvep"] = plugin_cvep
self.logger.debug("Register CVEP Success")
else:
raise PEPError(
"The cvep rule is invalid for %s, it should be "
"a list or tuple." % plugin_info.plugin_name
)
def _filter_handler(self, plugin_info, filter_rule):
if isinstance(filter_rule, (list, tuple)):
_filter_rule = {}
for f in filter_rule:
name, func = f if isinstance(f, (tuple, list)) else (None, f)
if not callable(func):
raise PEPError(
"The filter found a func, that cannot be called for %s"
% plugin_info.plugin_name
)
if not name:
name = func.__name__
_filter_rule[name] = func
filter_rule = _filter_rule
if isinstance(filter_rule, dict):
plugin_filter = []
for name, func in iteritems(filter_rule):
if callable(func):
plugin_filter.append((name, func))
else:
raise PEPError(
"The filter cannot be called for %s."
% plugin_info.plugin_name
)
plugin_info["plugin_filter"] = plugin_filter
else:
raise PEPError(
"The filter rule is invalid for %s, "
"it should be a dict." % plugin_info.plugin_name
)
|
BSD 3-Clause New or Revised License
|
gugarosa/opfython
|
opfython/math/distance.py
|
min_symmetric_distance
|
python
|
def min_symmetric_distance(x, y):
dist1 = (x - y) ** 2 / x
dist2 = (x - y) ** 2 / y
return np.minimum(np.sum(dist1), np.sum(dist2))
|
Calculates the Min Symmetric Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Min Symmetric Distance between x and y.
|
https://github.com/gugarosa/opfython/blob/9c737952f5bd109d5a8a50fd438fcac3d9c3e42b/opfython/math/distance.py#L625-L640
|
import math
import numpy as np
from numba import njit
import opfython.utils.constants as c
import opfython.utils.decorator as d
@d.avoid_zero_division
@njit(cache=True)
def additive_symmetric_distance(x, y):
dist = ((x - y) ** 2 * (x + y)) / (x * y)
return 2 * np.sum(dist)
@njit(cache=True)
def average_euclidean_distance(x, y):
dist = squared_euclidean_distance(x, y)
return (dist / x.shape[0]) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def bhattacharyya_distance(x, y):
dist = -math.log(np.sum((x * y) ** 0.5))
return dist
@d.avoid_zero_division
@njit(cache=True)
def bray_curtis_distance(x, y):
dist = np.sum(np.fabs(x - y)) / np.sum(x + y)
return dist
@d.avoid_zero_division
@njit(cache=True)
def canberra_distance(x, y):
dist = np.fabs(x - y) / (np.fabs(x) + np.fabs(y))
return np.sum(dist)
@njit(cache=True)
def chebyshev_distance(x, y):
dist = np.fabs(x - y)
return np.amax(dist)
@d.avoid_zero_division
@njit(cache=True)
def chi_squared_distance(x, y):
dist = ((x - y) ** 2 / (x + y))
return 0.5 * np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def chord_distance(x, y):
dist = 2 - 2 * (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5))
return dist ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def clark_distance(x, y):
dist = ((x - y) / np.fabs(x + y)) ** 2
return np.sum(dist) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def cosine_distance(x, y):
dist = 1 - (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5))
return dist
@d.avoid_zero_division
@njit(cache=True)
def dice_distance(x, y):
dist = 2 * np.sum(x * y) / (np.sum(x ** 2) + np.sum(y ** 2))
return 1 - dist
@d.avoid_zero_division
@njit(cache=True)
def divergence_distance(x, y):
dist = (x - y) ** 2 / (x + y) ** 2
return 2 * np.sum(dist)
@njit(cache=True)
def euclidean_distance(x, y):
dist = (x - y) ** 2
return np.sum(dist) ** 0.5
@njit(cache=True)
def gaussian_distance(x, y, gamma=1):
dist = (x - y) ** 2
return math.exp(-gamma * np.sum(dist) ** 0.5)
@njit(cache=True)
def gower_distance(x, y):
dist = np.fabs(x - y)
return np.sum(dist) / x.shape[0]
@njit(cache=True)
def hamming_distance(x, y):
dist = np.count_nonzero(x != y)
return dist
@d.avoid_zero_division
@njit(cache=True)
def hassanat_distance(x, y):
dist = np.zeros(x.shape[0])
mask = np.minimum(x, y) >= 0
for i in range(x.shape[0]):
if mask[i] is True:
dist[i] = 1 - (1 + np.minimum(x[i], y[i])) / (1 + np.maximum(x[i], y[i]))
else:
dist[i] = 1 - (1 + np.minimum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i]))) / (1 + np.maximum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i])))
return np.sum(dist)
@njit(cache=True)
def hellinger_distance(x, y):
dist = 2 * (x ** 0.5 - y ** 0.5) ** 2
return np.sum(dist) ** 0.5
@d.avoid_zero_division
def jaccard_distance(x, y):
dist = np.sum((x - y) ** 2) / (np.sum(x ** 2) + np.sum(y ** 2) - np.sum(x * y))
return dist
@d.avoid_zero_division
@njit(cache=True)
def jeffreys_distance(x, y):
dist = (x - y) * np.log(x / y)
return np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def jensen_distance(x, y):
dist = (x * np.log(x) + y * np.log(y)) / 2 - ((x + y) / 2) * np.log((x + y) / 2)
return 0.5 * np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def jensen_shannon_distance(x, y):
dist1 = x * np.log((2 * x) / (x + y))
dist2 = y * np.log((2 * y) / (x + y))
return 0.5 * (np.sum(dist1) + np.sum(dist2))
@d.avoid_zero_division
@njit(cache=True)
def k_divergence_distance(x, y):
dist = x * np.log((2 * x) / (x + y))
return np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def kulczynski_distance(x, y):
dist = np.sum(np.fabs(x - y)) / np.sum(np.minimum(x, y))
return dist
@d.avoid_zero_division
@njit(cache=True)
def kullback_leibler_distance(x, y):
dist = x * np.log(x / y)
return np.sum(dist)
@njit(cache=True)
def log_euclidean_distance(x, y):
dist = euclidean_distance(x, y)
return c.MAX_ARC_WEIGHT * math.log(dist + 1)
@njit(cache=True)
def log_squared_euclidean_distance(x, y):
dist = squared_euclidean_distance(x, y)
return c.MAX_ARC_WEIGHT * math.log(dist + 1)
@njit(cache=True)
def lorentzian_distance(x, y):
dist = np.log(1 + np.fabs(x - y))
return np.sum(dist)
@njit(cache=True)
def manhattan_distance(x, y):
dist = np.fabs(x - y)
return np.sum(dist)
@njit(cache=True)
def matusita_distance(x, y):
dist = (x ** 0.5 - y ** 0.5) ** 2
return np.sum(dist) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def max_symmetric_distance(x, y):
dist1 = (x - y) ** 2 / x
dist2 = (x - y) ** 2 / y
return np.maximum(np.sum(dist1), np.sum(dist2))
@d.avoid_zero_division
@njit(cache=True)
def mean_censored_euclidean_distance(x, y):
dist = squared_euclidean_distance(x, y)
diff = np.count_nonzero(x + y != 0)
return (dist / diff) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
|
Apache License 2.0
|
jnederlo/dfs_optimizers
|
nhl/draftkings.py
|
Draftkings.type_1
|
python
|
def type_1(self, lineups):
prob = pulp.LpProblem('NHL', pulp.LpMaximize)
skaters_lineup = [pulp.LpVariable("player_{}".format(i+1), cat="Binary") for i in range(self.num_skaters)]
goalies_lineup = [pulp.LpVariable("goalie_{}".format(i+1), cat="Binary") for i in range(self.num_goalies)]
prob += (pulp.lpSum(skaters_lineup[i] for i in range(self.num_skaters)) == 8)
prob += (pulp.lpSum(goalies_lineup[i] for i in range(self.num_goalies)) == 1)
prob += (2 <= pulp.lpSum(self.positions['C'][i]*skaters_lineup[i] for i in range(self.num_skaters)))
prob += (pulp.lpSum(self.positions['C'][i]*skaters_lineup[i] for i in range(self.num_skaters)) <= 3)
prob += (3 <= pulp.lpSum(self.positions['W'][i]*skaters_lineup[i] for i in range(self.num_skaters)))
prob += (pulp.lpSum(self.positions['W'][i]*skaters_lineup[i] for i in range(self.num_skaters)) <= 4)
prob += (2 <= pulp.lpSum(self.positions['D'][i]*skaters_lineup[i] for i in range(self.num_skaters)))
prob += (pulp.lpSum(self.positions['D'][i]*skaters_lineup[i] for i in range(self.num_skaters)) <= 3)
prob += ((pulp.lpSum(self.skaters_df.loc[i, 'sal']*skaters_lineup[i] for i in range(self.num_skaters)) +
pulp.lpSum(self.goalies_df.loc[i, 'sal']*goalies_lineup[i] for i in range(self.num_goalies))) <= self.salary_cap)
used_team = [pulp.LpVariable("u{}".format(i+1), cat="Binary") for i in range(self.num_teams)]
for i in range(self.num_teams):
prob += (used_team[i] <= pulp.lpSum(self.skaters_teams[k][i]*skaters_lineup[k] for k in range(self.num_skaters)))
prob += (pulp.lpSum(self.skaters_teams[k][i]*skaters_lineup[k] for k in range(self.num_skaters)) <= 6*used_team[i])
prob += (pulp.lpSum(used_team[i] for i in range(self.num_teams)) >= 3)
for i in range(self.num_goalies):
prob += (6*goalies_lineup[i] + pulp.lpSum(self.goalies_opponents[k][i]*skaters_lineup[k] for k in range(self.num_skaters)) <= 6)
line_stack_3 = [pulp.LpVariable("ls3{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (3*line_stack_3[i] <= pulp.lpSum(self.team_lines[k][i]*skaters_lineup[k] for k in range(self.num_skaters)))
prob += (pulp.lpSum(line_stack_3[i] for i in range(self.num_lines)) >= 1)
line_stack_2 = [pulp.LpVariable("ls2{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (2*line_stack_2[i] <= pulp.lpSum(self.team_lines[k][i]*skaters_lineup[k] for k in range(self.num_skaters)))
prob += (pulp.lpSum(line_stack_2[i] for i in range(self.num_lines)) >= 2)
for i in range(len(lineups)):
prob += ((pulp.lpSum(lineups[i][k]*skaters_lineup[k] for k in range(self.num_skaters)) +
pulp.lpSum(lineups[i][self.num_skaters+k]*goalies_lineup[k] for k in range(self.num_goalies))) <= self.overlap)
prob += pulp.lpSum((pulp.lpSum(self.skaters_df.loc[i, 'proj']*skaters_lineup[i] for i in range(self.num_skaters)) +
pulp.lpSum(self.goalies_df.loc[i, 'proj']*goalies_lineup[i] for i in range(self.num_goalies))))
status = prob.solve(self.solver)
if status != pulp.LpStatusOptimal:
print('Only {} feasible lineups produced'.format(len(lineups)), '\n')
return None
lineup_copy = []
for i in range(self.num_skaters):
if skaters_lineup[i].varValue >= 0.9 and skaters_lineup[i].varValue <= 1.1:
lineup_copy.append(1)
else:
lineup_copy.append(0)
for i in range(self.num_goalies):
if goalies_lineup[i].varValue >= 0.9 and goalies_lineup[i].varValue <= 1.1:
lineup_copy.append(1)
else:
lineup_copy.append(0)
return lineup_copy
|
Sets up the pulp LP problem, adds all of the constraints and solves for the maximum value for each generated lineup.
Type 1 constraints include:
- 3-2 stacking (1 line of 3 players and one seperate line of 2 players)
- goalies stacking
- team stacking
Returns a single lineup (i.e all of the players either set to 0 or 1) indicating if a player was included in a lineup or not.
|
https://github.com/jnederlo/dfs_optimizers/blob/6b115061cc0543ae654555e26f7d6acf941f15e6/nhl/draftkings.py#L14-L100
|
import pulp
from nhl.optimizer import Optimizer
class Draftkings(Optimizer):
def __init__(self, num_lineups, overlap, solver, players_filepath, goalies_filepath, output_filepath):
super().__init__(num_lineups, overlap, solver, players_filepath, goalies_filepath, output_filepath)
self.salary_cap = 50000
self.header = ['C', 'C', 'W', 'W', 'W', 'D', 'D', 'G', 'UTIL']
|
MIT License
|
sassoftware/python-sasctl
|
src/sasctl/_services/cas_management.py
|
CASManagement.list_caslibs
|
python
|
def list_caslibs(cls, server, filter_=None):
return (
cls._get_rel(server, 'caslibs', func=cls.get_server, filter_=filter_) or []
)
|
List caslibs available on a server.
Parameters
----------
server : str or dict
Name, ID, or dictionary representation of the server.
filter_ : str, optional
A `formatted <https://developer.sas.com/reference/filtering>`_
filter string.
Returns
-------
list
A collection of :class:`.RestObj` instances.
|
https://github.com/sassoftware/python-sasctl/blob/ab6387b86a26f6b0b08fbb36d0c94fe18be59b5f/src/sasctl/_services/cas_management.py#L25-L44
|
import os
from .service import Service
DEFAULT_SERVER = 'cas-shared-default'
DEFAULT_CASLIB = 'casuser'
class CASManagement(Service):
_SERVICE_ROOT = '/casManagement'
list_servers, get_server, _, _ = Service._crud_funcs('/servers', 'server')
@classmethod
|
Apache License 2.0
|
tenable/pytenable
|
tenable/sc/audit_files.py
|
AuditFileAPI.export_audit
|
python
|
def export_audit(self, id, fobj=None):
resp = self._api.get('auditFile/{}/export'.format(
self._check('id', id, int)), stream=True)
if not fobj:
fobj = BytesIO()
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
return fobj
|
Exports an Audit File.
:sc-api:`audit file: export <AuditFile.html#AuditFileRESTReference-/auditFile/{id}/export>`
Args:
id (int): The audit file numeric identifier.
fobj (FileObject, optional):
The file-like object to write the resulting file into. If
no file-like object is provided, a BytesIO objects with the
downloaded file will be returned. Be aware that the default
option of using a BytesIO object means that the file will be
stored in memory, and it's generally recommended to pass an
actual file-object to write to instead.
Returns:
:obj:`FileObject`:
The file-like object with the resulting zipped report.
Examples:
>>> with open('example.zip', 'wb') as fobj:
... sc.audit_files.export_audit(1, fobj)
|
https://github.com/tenable/pytenable/blob/32b925f0cebd4d3032f85e65571dd9593778b9f1/tenable/sc/audit_files.py#L319-L357
|
from .base import SCEndpoint
from io import BytesIO
from os.path import basename
class AuditFileAPI(SCEndpoint):
def _constructor(self, **kw):
if 'name' in kw:
self._check('name', kw['name'], str)
if 'description' in kw:
self._check('description', kw['description'], str)
if 'type' in kw:
self._check('type', kw['type'], str,
choices=['', 'scapWindows', 'scapLinux'])
if 'template' in kw:
kw['auditFileTemplate'] = {'id': self._check(
'template', kw['template'], int)}
del(kw['template'])
if 'vars' in kw:
kw['variables'] = [{
'name': self._check('var:name', k, str),
'value': self._check('var:value', v, str)
} for k,v in self._check('vars', kw['vars'], dict).items()]
del(kw['vars'])
if 'filename' in kw:
self._check('filename', kw['filename'], str)
if 'orig_filename' in kw:
kw['originalFilename'] = self._check(
'orig_filename', kw['orig_filename'], str)
del(kw['orig_filename'])
if 'version' in kw:
self._check('version', kw['version'], str,
choices=['1.0', '1.1', '1.2'])
if 'benchmark' in kw:
kw['benchmarkName'] = self._check('benchmark', kw['benchmark'], str)
del(kw['benchmark'])
if 'profile' in kw:
kw['profileName'] = self._check('profile', kw['profile'], str)
del(kw['profile'])
if 'data_stream' in kw:
kw['dataStreamName'] = self._check(
'data_stream', kw['data_stream'], str)
del(kw['data_stream'])
if 'tailoring_filename' in kw:
kw['tailoringFilename'] = self._check(
'tailoring_filename', kw['tailoring_filename'], str)
del(kw['tailoring_filename'])
if 'tailoring_orig_filename' in kw:
kw['tailoringOriginalFilename'] = self._check(
'tailoring_orig_filename', kw['tailoring_orig_filename'], str)
del(kw['tailoring_orig_filename'])
return kw
def create(self, name, audit_file=None, tailoring_file=None, **kw):
kw['name'] = name
if audit_file:
if hasattr(audit_file, 'name'):
kw['orig_filename'] = basename(audit_file.name)
kw['filename'] = self._api.files.upload(audit_file)
if tailoring_file:
if hasattr(tailoring_file, 'name'):
kw['tailoring_orig_filename'] = basename(tailoring_file.name)
kw['tailoring_filename'] = self._api.files.upload(tailoring_file)
payload = self._constructor(**kw)
return self._api.post('auditFile', json=payload).json()['response']
def details(self, id, fields=None):
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str) for f in fields])
return self._api.get('auditFile/{}'.format(self._check('id', id, int)),
params=params).json()['response']
def edit(self, id, audit_file=None, tailoring_file=None, **kw):
if audit_file:
if hasattr(audit_file, 'name'):
kw['orig_filename'] = basename(audit_file.name)
kw['filename'] = self._api.files.upload(audit_file)
if tailoring_file:
if hasattr(tailoring_file, 'name'):
kw['tailoring_orig_filename'] = basename(tailoring_file.name)
kw['tailoring_filename'] = self._api.files.upload(tailoring_file)
payload = self._constructor(**kw)
return self._api.patch('auditFile/{}'.format(
self._check('id', id, int)), json=payload).json()['response']
def delete(self, id):
return self._api.delete('auditFile/{}'.format(
self._check('id', id, int))).json()['response']
def list(self, fields=None):
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('auditFile', params=params).json()['response']
|
MIT License
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.