repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
sec-i/ecocontrol
|
server/forecasting/weather.py
|
CurrentWeatherForecast.get_forecast_temperature_daily
|
python
|
def get_forecast_temperature_daily(self, date):
self.forecast_temperatures_daily = self.get_weather_forecast(
hourly=False)
time_passed = int((calendar.timegm(date.timetuple()) - self.env.now) / (60.0 * 60.0))
weight = (time_passed % 24) / 24.0
t0 = min(int(time_passed / 24), len(
self.forecast_temperatures_daily) - 1)
t1 = min(t0 + 1, len(self.forecast_temperatures_daily) - 1)
a0 = self.forecast_temperatures_daily[t0]
a1 = self.forecast_temperatures_daily[t1]
return self.mix(a0, a1, weight)
|
get the forecast for given date. This only has day accuracy, but the forecast span is longer
|
https://github.com/sec-i/ecocontrol/blob/23e88e13447b857e0254d9cd67d1ee7e664b127e/server/forecasting/weather.py#L210-L221
|
import urllib2
import json
import time
import logging
import calendar
from datetime import datetime, timedelta
from server.forecasting.simulation.demodata.old_demands import outside_temperatures_2013, outside_temperatures_2012
from server.forecasting.helpers import cached_data, approximate_index
from server.models import WeatherValue, RealWeatherValue
from django.utils.timezone import utc
logger = logging.getLogger('simulation')
demo_weather = None
current_weather = None
def get_temperature(env, date):
global demo_weather
global current_weather
if env.demo_mode:
if demo_weather == None:
demo_weather = DemoWeather(env)
if env.forecast:
return demo_weather.get_temperature_estimate(date)
else:
return demo_weather.get_temperature(date)
else:
if current_weather == None:
current_weather = CurrentWeatherForecast(env)
return current_weather.get_temperature_estimate(date)
class DemoWeather:
def __init__(self, env=None):
self.env = env
self.forecast_query_date = None
self.forecast_temperatures_3hourly = []
self.forecast_temperatures_daily = []
self.hourly = True
self.cache_day = {}
self.cache_real_values = [[],[]]
self.error_day_cache = {}
def get_temperature(self,date):
if self.cache_real_values == [[],[]]:
real_temps = RealWeatherValue.objects.all()
for entry in real_temps:
self.cache_real_values[0].append(calendar.timegm(entry.timestamp.utctimetuple()))
self.cache_real_values[1].append(float(entry.temperature))
if len(self.cache_real_values[1]) < 2:
raise Exception("not enough weather values in database")
idx = approximate_index(self.cache_real_values[0], calendar.timegm(date.utctimetuple()))
return self.cache_real_values[1][idx]
def get_temperature_estimate(self, target_date):
time_passed = (target_date - datetime.fromtimestamp(self.env.initial_date).replace(tzinfo=utc)).total_seconds() / (60.0 * 60.0 * 24)
initial0 = datetime.fromtimestamp(self.env.initial_date).replace(tzinfo=utc, minute=0,second=0)
initial1 = initial0 + timedelta(hours=1)
target_date = target_date.replace(tzinfo=utc, hour=0,minute=0,second=0)
target_date_key = target_date.strftime("%Y-%m-%d")
if self.error_day_cache.has_key(target_date_key):
return self.error_day_cache[target_date_key][target_date.hour]
if not self.cache_day.has_key(target_date_key):
forecasts_until_now = WeatherValue.objects.filter(timestamp__lte=initial0)
if len(forecasts_until_now) == 0:
return self.get_temperature(target_date)
newest_creation_timestamp = forecasts_until_now.latest('timestamp').timestamp
values0 = WeatherValue.objects.filter(timestamp=newest_creation_timestamp).filter(target_time__range = [target_date.replace(hour=0), target_date.replace(hour=23,minute=59)])
day_values0 = values0.order_by("-timestamp")
test_list = [(float(v.temperature),v.target_time.hour) for v in day_values0]
if len(test_list) < 24:
self.error_day_cache[target_date_key] = self._fill_error_gaps(test_list, target_date)
return self.error_day_cache[target_date_key][target_date.hour]
self.cache_day[target_date_key] = [float(v.temperature) for v in day_values0]
values0 =self.cache_day[target_date_key]
return self.mix(values0[target_date.hour],values0[min(target_date.hour+1,23)], target_date.minute / 60.0)
def _fill_error_gaps(self, input_list, date):
logger.warning("not enough dates in list "+ len(input_list) + " " + date)
output = [None for i in range(24)]
for temp_date in input_list:
output[temp_date[1]] = temp_date[0]
for i,v in enumerate(output):
if v == None:
if len(self.cache_day) == 0:
output[i] = self.get_temperature(date.replace(hour=i))
else:
latest = sorted(self.cache_day,reverse=True)[0]
output[i] = self.cache_day[latest][i]
return output
def mix(self, a, b, x):
return a * (1 - x) + b * x
def get_date(self):
return time.time()
class CurrentWeatherForecast:
def __init__(self, env=None, city="Berlin"):
self.env = env
self.forecast_query_date = None
self.forecast_temperatures_3hourly = []
self.forecast_temperatures_daily = []
self.hourly = True
self.city_id = self.find_city(city)['default']
def get_temperature_estimate(self, date):
time_passed = (calendar.timegm(date.timetuple()) - self.env.now) / (60.0 * 60.0 * 24)
if time_passed < 0.0 or time_passed > 13.0:
history_data = self.get_average_outside_temperature(date)
return history_data
forecast_data_hourly = self.get_forecast_temperature_hourly(date)
forecast_data_daily = self.get_forecast_temperature_daily(date)
if time_passed < 5.0:
return forecast_data_hourly
else:
return forecast_data_daily
def get_forecast_temperature_hourly(self, date):
self.forecast_temperatures_3hourly = self.get_weather_forecast(
hourly=True)
time_passed = int((calendar.timegm(date.timetuple()) - self.env.now) / (60.0 * 60.0))
weight = (time_passed % 3) / 3.0
t0 = min(int(time_passed / 3), len(
self.forecast_temperatures_3hourly) - 1)
t1 = min(t0 + 1, len(self.forecast_temperatures_3hourly) - 1)
a0 = self.forecast_temperatures_3hourly[t0]
a1 = self.forecast_temperatures_3hourly[t1]
return self.mix(a0, a1, weight)
|
MIT License
|
scottwoodall/python-pgextras
|
pgextras/__init__.py
|
PgExtras.index_size
|
python
|
def index_size(self):
return self.execute(sql.INDEX_SIZE)
|
Show the size of indexes, descending by size.
:returns: list
|
https://github.com/scottwoodall/python-pgextras/blob/d3aa83081d41b14b7c1f003cd837c812a2b5fff5/pgextras/__init__.py#L400-L407
|
import re
from collections import namedtuple
import psycopg2
import psycopg2.extras
from . import sql_constants as sql
__author__ = 'Scott Woodall'
__email__ = 'scott.woodall@gmail.com'
__version__ = '0.2.1'
class PgExtras(object):
def __init__(self, dsn=None):
self.dsn = dsn
self._pg_stat_statement = None
self._cursor = None
self._conn = None
self._is_pg_at_least_nine_two = None
def __enter__(self):
return self
def __exit__(self, type, value, trace):
self.close_db_connection()
@property
def cursor(self):
if self._cursor is None:
self._conn = psycopg2.connect(
self.dsn,
cursor_factory=psycopg2.extras.NamedTupleCursor
)
self._cursor = self._conn.cursor()
return self._cursor
@property
def query_column(self):
if self.is_pg_at_least_nine_two():
return 'query'
else:
return 'current_query'
@property
def pid_column(self):
if self.is_pg_at_least_nine_two():
return 'pid'
else:
return 'procpid'
def pg_stat_statement(self):
if self._pg_stat_statement is None:
results = self.execute(sql.PG_STAT_STATEMENT)
is_available = results[0].available
if is_available:
self._pg_stat_statement = True
else:
self._pg_stat_statement = False
return self._pg_stat_statement
def get_missing_pg_stat_statement_error(self):
Record = namedtuple('Record', 'error')
error = """
pg_stat_statements extension needs to be installed in the
public schema first. This extension is only available on
Postgres versions 9.2 or greater. You can install it by
adding pg_stat_statements to shared_preload_libraries in
postgresql.conf, restarting postgres and then running the
following sql statement in your database:
CREATE EXTENSION pg_stat_statements;
"""
return Record(error)
def is_pg_at_least_nine_two(self):
if self._is_pg_at_least_nine_two is None:
results = self.version()
regex = re.compile("PostgreSQL (\d+\.\d+\.\d+) on")
matches = regex.match(results[0].version)
version = matches.groups()[0]
if version > '9.2.0':
self._is_pg_at_least_nine_two = True
else:
self._is_pg_at_least_nine_two = False
return self._is_pg_at_least_nine_two
def close_db_connection(self):
if self._cursor is not None:
self._cursor.close()
if self._conn is not None:
self._conn.close()
def execute(self, statement):
sql = statement.replace('\n', '')
sql = ' '.join(sql.split())
self.cursor.execute(sql)
return self.cursor.fetchall()
def cache_hit(self):
return self.execute(sql.CACHE_HIT)
def index_usage(self):
return self.execute(sql.INDEX_USAGE)
def calls(self, truncate=False):
if self.pg_stat_statement():
if truncate:
select = """
SELECT CASE
WHEN length(query) < 40
THEN query
ELSE substr(query, 0, 38) || '..'
END AS qry,
"""
else:
select = 'SELECT query,'
return self.execute(sql.CALLS.format(select=select))
else:
return [self.get_missing_pg_stat_statement_error()]
def blocking(self):
return self.execute(
sql.BLOCKING.format(
query_column=self.query_column,
pid_column=self.pid_column
)
)
def outliers(self, truncate=False):
if self.pg_stat_statement():
if truncate:
query = """
CASE WHEN length(query) < 40
THEN query
ELSE substr(query, 0, 38) || '..'
END
"""
else:
query = 'query'
return self.execute(sql.OUTLIERS.format(query=query))
else:
return [self.get_missing_pg_stat_statement_error()]
def vacuum_stats(self):
return self.execute(sql.VACUUM_STATS)
def bloat(self):
return self.execute(sql.BLOAT)
def long_running_queries(self):
if self.is_pg_at_least_nine_two():
idle = "AND state <> 'idle'"
else:
idle = "AND current_query <> '<IDLE>'"
return self.execute(
sql.LONG_RUNNING_QUERIES.format(
pid_column=self.pid_column,
query_column=self.query_column,
idle=idle
)
)
def seq_scans(self):
return self.execute(sql.SEQ_SCANS)
def unused_indexes(self):
return self.execute(sql.UNUSED_INDEXES)
def total_table_size(self):
return self.execute(sql.TOTAL_TABLE_SIZE)
def total_indexes_size(self):
return self.execute(sql.TOTAL_INDEXES_SIZE)
def table_size(self):
return self.execute(sql.TABLE_SIZE)
|
BSD 3-Clause New or Revised License
|
voldemortx/pytorch-auto-drive
|
tools/llamas_evaluation/llamas_official_scripts.py
|
_extend_lane
|
python
|
def _extend_lane(lane, projection_matrix):
filtered_markers = filter(
lambda x: (x['pixel_start']['y'] != x['pixel_end']['y'] and x['pixel_start']['x'] != x['pixel_end']['x']),
lane['markers'])
closest_marker = min(filtered_markers, key=lambda x: x['world_start']['z'])
if closest_marker['world_start']['z'] < 0:
return lane
x_gradient = (closest_marker['world_end']['x'] - closest_marker['world_start']['x']) / (closest_marker['world_end']['z'] - closest_marker['world_start']['z'])
y_gradient = (closest_marker['world_end']['y'] - closest_marker['world_start']['y']) / (closest_marker['world_end']['z'] - closest_marker['world_start']['z'])
zero_x = closest_marker['world_start']['x'] - (closest_marker['world_start']['z'] - 1) * x_gradient
zero_y = closest_marker['world_start']['y'] - (closest_marker['world_start']['z'] - 1) * y_gradient
pixel_x_gradient = (closest_marker['pixel_end']['x'] - closest_marker['pixel_start']['x']) / (closest_marker['pixel_end']['y'] - closest_marker['pixel_start']['y'])
pixel_y_gradient = (closest_marker['pixel_end']['y'] - closest_marker['pixel_start']['y']) / (closest_marker['pixel_end']['x'] - closest_marker['pixel_start']['x'])
pixel_zero_x = closest_marker['pixel_start']['x'] + (716 - closest_marker['pixel_start']['y']) * pixel_x_gradient
if pixel_zero_x < 0:
left_y = closest_marker['pixel_start']['y'] - closest_marker['pixel_start']['x'] * pixel_y_gradient
new_pixel_point = (0, left_y)
elif pixel_zero_x > 1276:
right_y = closest_marker['pixel_start']['y'] + (1276 - closest_marker['pixel_start']['x']) * pixel_y_gradient
new_pixel_point = (1276, right_y)
else:
new_pixel_point = (pixel_zero_x, 716)
new_marker = {
'lane_marker_id': 'FAKE',
'world_end': {
'x': closest_marker['world_start']['x'],
'y': closest_marker['world_start']['y'],
'z': closest_marker['world_start']['z']
},
'world_start': {
'x': zero_x,
'y': zero_y,
'z': 1
},
'pixel_end': {
'x': closest_marker['pixel_start']['x'],
'y': closest_marker['pixel_start']['y']
},
'pixel_start': {
'x': ir(new_pixel_point[0]),
'y': ir(new_pixel_point[1])
}
}
lane['markers'].insert(0, new_marker)
return lane
|
Extends marker closest to the camera
Adds an extra marker that reaches the end of the image
Parameters
----------
lane : iterable of markers
projection_matrix : 3x3 projection matrix
|
https://github.com/voldemortx/pytorch-auto-drive/blob/e904fbd67f0d6c930dc5756fe0957043aabcf454/tools/llamas_evaluation/llamas_official_scripts.py#L34-L106
|
import json
import os
import numpy as np
|
BSD 3-Clause New or Revised License
|
megviidetection/video_analyst
|
docs/TEMPLATES/contrib_module/contrib_module_base.py
|
ContribModuleBase.__init__
|
python
|
def __init__(self, ) -> None:
self._hyper_params = self.default_hyper_params
self._state = dict()
|
r"""
Contrib Module Base Class
|
https://github.com/megviidetection/video_analyst/blob/f4d1bccb1c698961fed3cb70808f1177fab13bdd/docs/TEMPLATES/contrib_module/contrib_module_base.py#L30-L35
|
from abc import ABCMeta, abstractmethod
from typing import List, Dict
import cv2 as cv
import numpy as np
from yacs.config import CfgNode
from videoanalyst.utils import Registry
TRACK_CONTRIB_MODULES = Registry('TRACK_CONTRIB_MODULE')
VOS_CONTRIB_MODULES = Registry('VOS_CONTRIB_MODULE')
TASK_CONTRIB_MODULES = dict(
track=TRACK_CONTRIB_MODULES,
vos=VOS_CONTRIB_MODULES,
)
class ContribModuleBase:
__metaclass__ = ABCMeta
default_hyper_params = dict()
|
MIT License
|
gbrammer/eazy-py
|
eazy/sps.py
|
wuyts_line_Av
|
python
|
def wuyts_line_Av(Acont):
return Acont + 0.9*Acont - 0.15*Acont**2
|
Wuyts prescription for extra extinction towards nebular emission
|
https://github.com/gbrammer/eazy-py/blob/6de8945ec3a37efc391f6511a66b931e7fa4ce7b/eazy/sps.py#L533-L537
|
import os
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.cosmology import WMAP9
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
LINE_CGS = 1.e-17*u.erg/u.second/u.cm**2
try:
from dust_attenuation.baseclasses import BaseAttAvModel
except:
BaseAttAvModel = object
from astropy.modeling import Parameter
import astropy.units as u
try:
from fsps import StellarPopulation
except:
StellarPopulation = object
from . import utils
from . import templates
DEFAULT_LABEL = 'fsps_tau{tau:3.1f}_logz{logzsol:4.2f}_tage{tage:4.2f}_av{Av:4.2f}'
WG00_DEFAULTS = dict(geometry='shell', dust_type='mw',
dust_distribution='homogeneous')
__all__ = ["Zafar15", "ExtinctionModel", "SMC", "Reddy15", "KC13",
"ParameterizedWG00", "ExtendedFsps", "fsps_line_info",
"wuyts_line_Av"]
class ArrayExtCurve(BaseAttAvModel):
name = 'Array'
Rv = 2.21
xarray = np.arange(0.09, 2.2, 0.01)
yarray = xarray*0.+1
left=None
right=None
def Alam(self, mu):
Alam = np.interp(mu, self.xarray, self.yarray,
left=self.left, right=self.right)
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu)
return np.maximum(alam*Av, 0.)
class Zafar15(BaseAttAvModel):
name = 'Zafar+15'
Rv = 2.21
@staticmethod
def Alam(mu, Rv):
x = 1/mu
coeffs = np.array([0.05694421, 0.57778243, -0.12417444])
Alam = np.polyval(coeffs, x)*2.21/Rv
fuv = x > 5.90
if fuv.sum() > 0:
Afuv = 1/Rv*(-4.678+2.355*x + 0.622*(x-5.90)**2) + 1.
Alam[fuv] = Afuv[fuv]
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu, self.Rv)
return np.maximum(alam*Av, 0.)
class ExtinctionModel(BaseAttAvModel):
curve_type = 'smc'
init_curve = None
def _curve_model(self):
if self.init_curve == self.curve_type:
return 0
if self.curve_type.upper() == 'SMC':
from dust_extinction.averages import G03_SMCBar as curve
elif self.curve_type.upper() == 'LMC':
from dust_extinction.averages import G03_LMCAvg as curve
elif self.curve_type.upper() in ['MW','F99']:
from dust_extinction.parameter_averages import F99 as curve
else:
raise ValueError(f'curve_type {self.curve_type} not recognized')
self.curve = curve()
self.init_curve = self.curve_type
def evaluate(self, x, Av):
self._curve_model()
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
curve = self.curve
xr = [x for x in curve.x_range]
xr[0] *= 1.001
xr[1] *= 0.999
print('xxx', xr)
if 'Rv' in curve.param_names:
klam = curve.evaluate(1/np.clip(xinv,
xr[0]/u.micron, xr[1]/u.micron),
Rv=curve.Rv)
else:
klam = curve.evaluate(1/np.clip(xinv,
xr[0]/u.micron, xr[1]/u.micron))
return klam*Av
class SMC(BaseAttAvModel):
from dust_extinction.averages import G03_SMCBar
SMCBar = G03_SMCBar()
bump_ampl = Parameter(description="Amplitude of UV bump",
default=0., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
def uv_bump(self, mu, bump_ampl):
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
klam = self.SMCBar.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
if bump_ampl > 0:
klam += self.uv_bump(xin.to(u.micron).value, bump_ampl)
return klam*Av
class Reddy15(BaseAttAvModel):
name = 'Reddy+15'
bump_ampl = Parameter(description="Amplitude of UV bump",
default=2., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
Rv = 2.505
@staticmethod
def _left(mu):
return -5.726 + 4.004/mu - 0.525/mu**2 + 0.029/mu**3 + 2.505
@staticmethod
def _right(mu):
return -2.672 - 0.010/mu + 1.532/mu**2 - 0.412/mu**3 + 2.505
@property
def koffset(self):
return self._left(0.6) - self._right(0.6)
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
left = mu < 0.6
klam = mu*0.
kleft = self._left(mu)
kright = self._right(mu)
klam[left] = self._left(mu[left])
klam[~left] = self._right(mu[~left]) + self.koffset
return np.maximum((klam + self.uv_bump(mu, bump_ampl))*Av/self.Rv, 0.)
def uv_bump(self, mu, bump_ampl):
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
class KC13(BaseAttAvModel):
name = 'Kriek+Conroy2013'
delta = Parameter(description="delta: slope of the power law",
default=0., min=-3., max=3.)
extra_params = {'extra_bump':1., 'beta':-3.2, 'extra_uv':-0.4}
x_range = [0.9e-4, 2.e8]
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
shapes.C00.x_range = self.x_range
shapes.N09.x_range = self.x_range
if self.x_range[0] < 0.18:
shapes.L02.x_range = [self.x_range[0], 0.18]
else:
shapes.L02.x_range = [0.097, 0.18]
self.N09 = shapes.N09()
def evaluate(self, x, Av, delta):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
x0 = 0.2175
gamma = 0.0350
ampl = (0.85 - 1.9*delta)*self.extra_params['extra_bump']
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
wred = np.array([2.199e4])*u.Angstrom
if self.N09.param_names[0] == 'x0':
Alam = self.N09.evaluate(xin, x0, gamma, ampl, delta, Av)
Ared = self.N09.evaluate(wred, x0, gamma, ampl, delta, Av)[0]
else:
Alam = self.N09.evaluate(xin, Av, x0, gamma, ampl, delta)
Ared = self.N09.evaluate(wred, Av, x0, gamma, ampl, delta)[0]
red = xin > wred[0]
if red.sum() > 0:
Alam[red] = Ared*(xin[red]/wred[0])**self.extra_params['beta']
blue = xin < 1500*u.Angstrom
if blue.sum() > 0:
plblue = np.ones(len(xin))
wb = xin[blue].to(u.Angstrom).value/1500
plblue[blue] = wb**self.extra_params['extra_uv']
Alam *= plblue
return Alam
class ParameterizedWG00(BaseAttAvModel):
coeffs = {'Av': np.array([-0.001, 0.026, 0.643, -0.016]),
'x0': np.array([ 3.067e-19, -7.401e-18, 6.421e-17, -2.370e-16,
3.132e-16, 2.175e-01]),
'gamma': np.array([ 2.101e-06, -4.135e-05, 2.719e-04,
-7.178e-04, 3.376e-04, 4.270e-02]),
'ampl': np.array([-1.906e-03, 4.374e-02, -3.501e-01,
1.228e+00, -2.151e+00, 8.880e+00]),
'slope': np.array([-4.084e-05, 9.984e-04, -8.893e-03,
3.670e-02, -7.325e-02, 5.891e-02])}
include_bump = 0.25
wg00_coeffs = {'geometry': 'shell',
'dust_type': 'mw',
'dust_distribution': 'homogeneous'}
name = 'ParameterizedWG00'
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
shapes.x_range_N09 = [0.009, 2.e8]
averages.x_range_C00 = [0.009, 2.e8]
averages.x_range_L02 = [0.009, 0.18]
self.N09 = shapes.N09()
def get_tau(self, Av):
tau_grid = np.arange(0, 10, 0.01)
av_grid = np.polyval(self.coeffs['Av'], tau_grid)
return np.interp(Av, av_grid, tau_grid, left=0., right=tau_grid[-1])
def evaluate(self, x, Av):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
tau_V = self.get_tau(Av)
x0 = np.polyval(self.coeffs['x0'], tau_V)
gamma = np.polyval(self.coeffs['gamma'], tau_V)
if self.include_bump:
ampl = np.polyval(self.coeffs['ampl'], tau_V)*self.include_bump
else:
ampl = 0.
slope = np.polyval(self.coeffs['slope'], tau_V)
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
if self.N09.param_names[0] == 'x0':
return self.N09.evaluate(xin, x0, gamma, ampl, slope, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, slope)
def fsps_line_info(wlimits=None):
try:
info_file = os.path.join(os.getenv('SPS_HOME'), 'data/emlines_info.dat')
with open(info_file, 'r') as f:
lines = f.readlines()
except:
return [], []
waves = np.array([float(l.split(',')[0]) for l in lines])
names = np.array([l.strip().split(',')[1].replace(' ','') for l in lines])
if wlimits is not None:
clip = (waves > wlimits[0]) & (waves < wlimits[1])
waves = waves[clip]
names = names[clip]
return waves, names
DEFAULT_LINES = fsps_line_info(wlimits=[1200, 1.9e4])[0]
BOUNDS = {}
BOUNDS['tage'] = [0.03, 12, 0.05]
BOUNDS['tau'] = [0.03, 2, 0.05]
BOUNDS['zred'] = [0.0, 13, 1.e-4]
BOUNDS['Av'] = [0.0, 15, 0.05]
BOUNDS['gas_logu'] = [-4, 0, 0.05]
BOUNDS['gas_logz'] = [-2, 0.3, 0.05]
BOUNDS['logzsol'] = [-2, 0.3, 0.05]
BOUNDS['sigma_smooth'] = [100, 500, 0.05]
|
MIT License
|
botfront/rasa-for-botfront
|
rasa/nlu/components.py
|
ComponentBuilder.create_component_from_class
|
python
|
def create_component_from_class(self, component_class: Type[C], **cfg: Any) -> C:
component_config = {"name": component_class.name}
return self.create_component(component_config, RasaNLUModelConfig(cfg))
|
Create a component based on a class and a configuration.
Mainly used to make use of caching when instantiating component classes.
|
https://github.com/botfront/rasa-for-botfront/blob/6e0e48d0059e197b5f686df1e27935769c3641b7/rasa/nlu/components.py#L864-L871
|
from collections import defaultdict
import itertools
import logging
import typing
from typing import Any, Dict, Hashable, List, Optional, Set, Text, Tuple, Type, Iterable
import rasa.utils.train_utils
from rasa.exceptions import MissingDependencyException
from rasa.shared.exceptions import RasaException
from rasa.shared.nlu.constants import TRAINABLE_EXTRACTORS
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import COMPONENT_INDEX
import rasa.shared.utils.io
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
logger = logging.getLogger(__name__)
def find_unavailable_packages(package_names: List[Text]) -> Set[Text]:
import importlib
failed_imports = set()
for package in package_names:
try:
importlib.import_module(package)
except ImportError:
failed_imports.add(package)
return failed_imports
def validate_requirements(component_names: List[Optional[Text]]) -> None:
from rasa.nlu import registry
failed_imports = {}
for component_name in component_names:
if component_name is None:
raise InvalidConfigException(
"Your pipeline configuration contains a component that is missing "
"a name. Please double check your configuration or if this is a "
"custom component make sure to implement the name property for "
"the component."
)
component_class = registry.get_component_class(component_name)
unavailable_packages = find_unavailable_packages(
component_class.required_packages()
)
if unavailable_packages:
failed_imports[component_name] = unavailable_packages
if failed_imports:
dependency_component_map = defaultdict(list)
for component, missing_dependencies in failed_imports.items():
for dependency in missing_dependencies:
dependency_component_map[dependency].append(component)
missing_lines = [
f"{d} (needed for {', '.join(cs)})"
for d, cs in dependency_component_map.items()
]
missing = "\n - ".join(missing_lines)
raise MissingDependencyException(
f"Not all required importable packages are installed to use "
f"the configured NLU pipeline. "
f"To use this pipeline, you need to install the "
f"missing modules: \n"
f" - {missing}\n"
f"Please install the packages that contain the missing modules."
)
def validate_component_keys(
component: "Component", component_config: Dict[Text, Any]
) -> None:
component_name = component_config.get("name")
allowed_keys = set(component.defaults.keys())
provided_keys = set(component_config.keys())
provided_keys.discard("name")
list_separator = "\n- "
for key in provided_keys:
if key not in allowed_keys:
rasa.shared.utils.io.raise_warning(
f"You have provided an invalid key `{key}` for component `{component_name}` in your pipeline. "
f"Valid options for `{component_name}` are:\n- "
f"{list_separator.join(allowed_keys)}"
)
def validate_empty_pipeline(pipeline: List["Component"]) -> None:
if len(pipeline) == 0:
raise InvalidConfigException(
"Can not train an empty pipeline. "
"Make sure to specify a proper pipeline in "
"the configuration using the 'pipeline' key."
)
def validate_only_one_tokenizer_is_used(pipeline: List["Component"]) -> None:
from rasa.nlu.tokenizers.tokenizer import Tokenizer
tokenizer_names = []
for component in pipeline:
if isinstance(component, Tokenizer):
tokenizer_names.append(component.name)
if len(tokenizer_names) > 1:
raise InvalidConfigException(
f"The pipeline configuration contains more than one tokenizer, "
f"which is not possible at this time. You can only use one tokenizer. "
f"The pipeline contains the following tokenizers: {tokenizer_names}. "
)
def _required_component_in_pipeline(
required_component: Type["Component"], pipeline: List["Component"]
) -> bool:
for previous_component in pipeline:
if isinstance(previous_component, required_component):
return True
return False
def validate_required_components(pipeline: List["Component"]) -> None:
for i, component in enumerate(pipeline):
missing_components = []
for required_component in component.required_components():
if not _required_component_in_pipeline(required_component, pipeline[:i]):
missing_components.append(required_component.name)
missing_components_str = ", ".join(f"'{c}'" for c in missing_components)
if missing_components:
raise InvalidConfigException(
f"The pipeline configuration contains errors. The component "
f"'{component.name}' requires {missing_components_str} to be "
f"placed before it in the pipeline. Please "
f"add the required components to the pipeline."
)
def validate_pipeline(pipeline: List["Component"]) -> None:
validate_empty_pipeline(pipeline)
validate_only_one_tokenizer_is_used(pipeline)
validate_required_components(pipeline)
def any_components_in_pipeline(components: Iterable[Text], pipeline: List["Component"]):
return any(any(component.name == c for component in pipeline) for c in components)
def validate_required_components_from_data(
pipeline: List["Component"], data: TrainingData
) -> None:
if data.response_examples and not any_components_in_pipeline(
["ResponseSelector"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data with examples for training a response "
"selector, but your NLU pipeline does not include a response selector "
"component. To train a model on your response selector data, add a "
"'ResponseSelector' to your pipeline."
)
if data.entity_examples and not any_components_in_pipeline(
TRAINABLE_EXTRACTORS, pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of entity examples, but "
"your NLU pipeline does not include an entity extractor trained on "
"your training data. To extract non-pretrained entities, add one of "
f"{TRAINABLE_EXTRACTORS} to your pipeline."
)
if data.entity_examples and not any_components_in_pipeline(
{"DIETClassifier", "CRFEntityExtractor"}, pipeline
):
if data.entity_roles_groups_used():
rasa.shared.utils.io.raise_warning(
"You have defined training data with entities that have roles/groups, "
"but your NLU pipeline does not include a 'DIETClassifier' or a "
"'CRFEntityExtractor'. To train entities that have roles/groups, "
"add either 'DIETClassifier' or 'CRFEntityExtractor' to your "
"pipeline."
)
if data.regex_features and not any_components_in_pipeline(
["RegexFeaturizer", "RegexEntityExtractor"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data with regexes, but "
"your NLU pipeline does not include a 'RegexFeaturizer' or a "
"'RegexEntityExtractor'. To use regexes, include either a "
"'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline."
)
if data.lookup_tables and not any_components_in_pipeline(
["RegexFeaturizer", "RegexEntityExtractor"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline does not include a 'RegexFeaturizer' or a "
"'RegexEntityExtractor'. To use lookup tables, include either a "
"'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline."
)
if data.lookup_tables:
if not any_components_in_pipeline(
["CRFEntityExtractor", "DIETClassifier"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline does not include any components that use these "
"features. To make use of lookup tables, add a 'DIETClassifier' or a "
"'CRFEntityExtractor' with the 'pattern' feature to your pipeline."
)
elif any_components_in_pipeline(["CRFEntityExtractor"], pipeline):
crf_components = [c for c in pipeline if c.name == "CRFEntityExtractor"]
has_pattern_feature = False
for crf in crf_components:
crf_features = crf.component_config.get("features")
has_pattern_feature = "pattern" in itertools.chain(*crf_features)
if not has_pattern_feature:
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline's 'CRFEntityExtractor' does not include the "
"'pattern' feature. To featurize lookup tables, add the 'pattern' "
"feature to the 'CRFEntityExtractor' in your pipeline."
)
if data.entity_synonyms and not any_components_in_pipeline(
["EntitySynonymMapper"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined synonyms in your training data, but "
"your NLU pipeline does not include an 'EntitySynonymMapper'. "
"To map synonyms, add an 'EntitySynonymMapper' to your pipeline."
)
class MissingArgumentError(ValueError):
def __init__(self, message: Text) -> None:
super().__init__(message)
self.message = message
def __str__(self) -> Text:
return self.message
class UnsupportedLanguageError(RasaException):
def __init__(self, component: Text, language: Text) -> None:
self.component = component
self.language = language
super().__init__(component, language)
def __str__(self) -> Text:
return (
f"component '{self.component}' does not support language '{self.language}'."
)
class ComponentMetaclass(type):
@property
def name(cls):
return cls.__name__
class Component(metaclass=ComponentMetaclass):
@property
def name(self) -> Text:
return type(self).name
@property
def unique_name(self) -> Text:
index = self.component_config.get(COMPONENT_INDEX)
return self.name if index is None else f"component_{index}_{self.name}"
@classmethod
def required_components(cls) -> List[Type["Component"]]:
return []
defaults = {}
supported_language_list = None
not_supported_language_list = None
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
if not component_config:
component_config = {}
component_config["name"] = self.name
self.component_config = rasa.utils.train_utils.override_defaults(
self.defaults, component_config
)
self.partial_processing_pipeline = None
self.partial_processing_context = None
@classmethod
def required_packages(cls) -> List[Text]:
return []
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
if cached_component:
return cached_component
return cls(meta)
@classmethod
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "Component":
language = config.language
if not cls.can_handle_language(language):
raise UnsupportedLanguageError(cls.name, language)
return cls(component_config)
def provide_context(self) -> Optional[Dict[Text, Any]]:
pass
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
pass
def process(self, message: Message, **kwargs: Any) -> None:
pass
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
pass
@classmethod
def cache_key(
cls, component_meta: Dict[Text, Any], model_metadata: "Metadata"
) -> Optional[Text]:
return None
def __getstate__(self) -> Any:
d = self.__dict__.copy()
if "partial_processing_context" in d:
del d["partial_processing_context"]
if "partial_processing_pipeline" in d:
del d["partial_processing_pipeline"]
return d
def __eq__(self, other) -> bool:
return self.__dict__ == other.__dict__
def prepare_partial_processing(
self, pipeline: List["Component"], context: Dict[Text, Any]
) -> None:
self.partial_processing_pipeline = pipeline
self.partial_processing_context = context
def partially_process(self, message: Message) -> Message:
if self.partial_processing_context is not None:
for component in self.partial_processing_pipeline:
component.process(message, **self.partial_processing_context)
else:
logger.info("Failed to run partial processing due to missing pipeline.")
return message
@classmethod
def can_handle_language(cls, language: Hashable) -> bool:
if language is None or (
cls.supported_language_list is None
and cls.not_supported_language_list is None
):
return True
if cls.supported_language_list and cls.not_supported_language_list:
raise RasaException(
"Only one of `supported_language_list` and `not_supported_language_list` can be set to not None"
)
supported_language_list = (
cls.supported_language_list
if cls.supported_language_list is not None
else []
)
not_supported_language_list = (
cls.not_supported_language_list
if cls.not_supported_language_list is not None
else []
)
if not supported_language_list and not not_supported_language_list:
raise RasaException(
"Empty lists for both "
"`supported_language_list` and `not_supported language_list` "
"is not a valid setting. If you meant to allow all languages "
"for the component use `None` for both of them."
)
if supported_language_list:
return language in supported_language_list
else:
return language not in not_supported_language_list
C = typing.TypeVar("C", bound=Component)
class ComponentBuilder:
def __init__(self, use_cache: bool = True) -> None:
self.use_cache = use_cache
self.component_cache = {}
def __get_cached_component(
self, component_meta: Dict[Text, Any], model_metadata: "Metadata"
) -> Tuple[Optional[Component], Optional[Text]]:
from rasa.nlu import registry
component_name = component_meta.get("class", component_meta["name"])
component_class = registry.get_component_class(component_name)
cache_key = component_class.cache_key(component_meta, model_metadata)
if (
cache_key is not None
and self.use_cache
and cache_key in self.component_cache
):
return self.component_cache[cache_key], cache_key
return None, cache_key
def __add_to_cache(self, component: Component, cache_key: Optional[Text]) -> None:
if cache_key is not None and self.use_cache:
self.component_cache[cache_key] = component
logger.info(
f"Added '{component.name}' to component cache. Key '{cache_key}'."
)
def load_component(
self,
component_meta: Dict[Text, Any],
model_dir: Text,
model_metadata: "Metadata",
**context: Any,
) -> Component:
from rasa.nlu import registry
try:
cached_component, cache_key = self.__get_cached_component(
component_meta, model_metadata
)
component = registry.load_component_by_meta(
component_meta, model_dir, model_metadata, cached_component, **context
)
if not cached_component:
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e:
raise RasaException(
f"Failed to load component from file '{component_meta.get('file')}'. "
f"Error: {e}"
)
def create_component(
self, component_config: Dict[Text, Any], cfg: RasaNLUModelConfig
) -> Component:
from rasa.nlu import registry
from rasa.nlu.model import Metadata
try:
component, cache_key = self.__get_cached_component(
component_config, Metadata(cfg.as_dict(), None)
)
if component is None:
component = registry.create_component_by_config(component_config, cfg)
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e:
raise RasaException(
f"Failed to create component '{component_config['name']}'. "
f"Error: {e}"
)
|
Apache License 2.0
|
kyubyong/vq-vae
|
networks.py
|
vq
|
python
|
def vq(z_e):
with tf.variable_scope("vq"):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[hp.K, hp.D],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
z = tf.expand_dims(z_e, -2)
lookup_table_ = tf.reshape(lookup_table, [1, 1, hp.K, hp.D])
dist = tf.norm(z - lookup_table_, axis=-1)
k = tf.argmin(dist, axis=-1)
z_q = tf.gather(lookup_table, k)
return z_q
|
Vector Quantization.
Args:
z_e: encoded variable. [B, t, D].
Returns:
z_q: nearest embeddings. [B, t, D].
|
https://github.com/kyubyong/vq-vae/blob/2b6a9c50cb206a9e364518cfda9e1cd6c56f3da1/networks.py#L37-L57
|
from __future__ import print_function
from hparams import Hyperparams as hp
from modules import conv1d, residual_block
import tensorflow as tf
def encoder(x):
with tf.variable_scope("encoder"):
for i in range(hp.encoder_layers):
x = tf.pad(x, [[0, 0], [1, 1], [0, 0]])
x = conv1d(x,
filters=hp.D,
size=hp.winsize,
strides=hp.stride,
padding="valid",
bn=True,
activation_fn=tf.nn.relu if i < hp.encoder_layers-1 else None,
scope="conv1d_{}".format(i))
z_e = x
return z_e
|
Apache License 2.0
|
zepmanbc/creopyson
|
creopyson/drawing.py
|
delete_symbol_def
|
python
|
def delete_symbol_def(client, symbol_file, drawing=None):
data = {"symbol_file": symbol_file}
if drawing is not None:
data["drawing"] = drawing
return client._creoson_post("drawing", "delete_symbol_def", data)
|
Delete a symbol definition and its instances from a drawing.
Args:
client (obj):
creopyson Client
symbol_file (str):
Name of the symbol file.
drawing (str, optional):
Drawing name. Defaults: current active drawing.
Returns:
None
|
https://github.com/zepmanbc/creopyson/blob/ab99d6c28780f5967d5daaaa59f5dbfd4fd96600/creopyson/drawing.py#L302-L320
|
def add_model(client, model, drawing=None):
data = {"model": model}
if drawing is not None:
data["drawing"] = drawing
return client._creoson_post("drawing", "add_model", data)
def add_sheet(client, position=None, drawing=None):
data = {}
if position is not None:
data["position"] = position
if drawing is not None:
data["drawing"] = drawing
return client._creoson_post("drawing", "add_sheet", data)
def create(
client,
template,
model=None,
drawing=None,
scale=None,
display=None,
activate=None,
new_window=None,
):
data = {"template": template}
if model is not None:
data["model"] = model
if drawing is not None:
data["drawing"] = drawing
if scale is not None:
data["scale"] = scale
if display is not None:
data["display"] = display
if activate is not None:
data["activate"] = activate
if new_window is not None:
data["new_window"] = new_window
return client._creoson_post("drawing", "create", data, "drawing")
def create_gen_view(
client,
model_view,
point,
drawing=None,
view=None,
sheet=None,
model=None,
scale=None,
display_data=None,
exploded=None,
):
data = {"model_view": model_view, "point": point}
if drawing is not None:
data["drawing"] = drawing
if view is not None:
data["view"] = view
if sheet is not None:
data["sheet"] = sheet
if model is not None:
data["model"] = model
if scale is not None:
data["scale"] = scale
if display_data is not None:
data["display_data"] = display_data
if exploded is not None:
data["exploded"] = exploded
return client._creoson_post("drawing", "create_gen_view", data)
def create_proj_view(
client,
parent_view,
point,
drawing=None,
view=None,
sheet=None,
display_data=None,
exploded=None,
):
data = {"parent_view": parent_view, "point": point}
if drawing is not None:
data["drawing"] = drawing
if view is not None:
data["view"] = view
if sheet is not None:
data["sheet"] = sheet
if display_data is not None:
data["display_data"] = display_data
if exploded is not None:
data["exploded"] = exploded
return client._creoson_post("drawing", "create_proj_view", data)
def create_symbol(
client, symbol_file, point, drawing=None, replace_values=None, sheet=None
):
data = {"symbol_file": symbol_file, "point": point}
if drawing is not None:
data["drawing"] = drawing
if replace_values is not None:
data["replace_values"] = replace_values
if sheet is not None:
data["sheet"] = sheet
return client._creoson_post("drawing", "create_symbol", data)
def delete_models(client, model=None, drawing=None, delete_views=None):
data = {}
if drawing is not None:
data["drawing"] = drawing
if model is not None:
data["model"] = model
if delete_views is not None:
data["delete_views"] = delete_views
return client._creoson_post("drawing", "delete_models", data)
def delete_sheet(client, sheet, drawing=None):
data = {"sheet": sheet}
if drawing is not None:
data["drawing"] = drawing
return client._creoson_post("drawing", "delete_sheet", data)
|
MIT License
|
codrsquad/setupmeta
|
setupmeta/scm.py
|
Snapshot.get_branch
|
python
|
def get_branch(self):
return "HEAD"
|
Consider branch to be always HEAD for snapshots
|
https://github.com/codrsquad/setupmeta/blob/e62f1979ba08bc198c3beca12359383afec3e4d8/setupmeta/scm.py#L108-L110
|
import os
import re
import setupmeta
RE_BRANCH_STATUS = re.compile(r"^## (.+)\.\.\.(([^/]+)/)?([^ ]+)\s*(\[(.+)\])?$")
RE_GIT_DESCRIBE = re.compile(r"^v?(.+?)(-\d+)?(-g\w+)?(-dirty)?$", re.IGNORECASE)
class Scm:
program = None
def __init__(self, root):
self.root = root
def __repr__(self):
return "%s %s" % (self.name, self.root)
@property
def name(self):
return self.__class__.__name__.lower()
def local_tags(self):
def remote_tags(self):
def get_branch(self):
def get_version(self):
def commit_files(self, commit, push, relative_paths, next_version):
def apply_tag(self, commit, push, next_version, branch):
def get_output(self, *args, **kwargs):
capture = kwargs.pop("capture", True)
cwd = kwargs.pop("cwd", self.root)
return setupmeta.run_program(self.program, *args, capture=capture, cwd=cwd, **kwargs)
def run(self, commit, *args, **kwargs):
fatal = kwargs.pop("fatal", True)
capture = kwargs.pop("capture", None)
return self.get_output(*args, capture=capture, fatal=fatal, dryrun=not commit, **kwargs)
class Snapshot(Scm):
program = None
def is_dirty(self):
v = os.environ.get(setupmeta.SCM_DESCRIBE)
return v and "dirty" in v
|
MIT License
|
aryanc403/remind
|
remind/cogs/reminders.py
|
Reminders.settings
|
python
|
async def settings(self, ctx):
settings = self.guild_map[ctx.guild.id]
channel_id, role_id, before, timezone, website_allowed_patterns, website_disallowed_patterns = settings
channel = ctx.guild.get_channel(channel_id)
role = ctx.guild.get_role(role_id)
if channel is None:
raise RemindersCogError('No channel set for reminders')
if role is None:
raise RemindersCogError('No role set for reminders')
if before is None:
raise RemindersCogError('No reminder_times set for reminders')
subscribed_websites_str = ", ".join(
website for website,
patterns in website_allowed_patterns.items() if patterns)
before_str = ', '.join(str(before_mins) for before_mins in before)
embed = discord_common.embed_success('Current reminder settings')
embed.add_field(name='Channel', value=channel.mention)
embed.add_field(name='Role', value=role.mention)
embed.add_field(name='Before',
value=f'At {before_str} mins before contest')
embed.add_field(name='Subscribed websites',
value=f'{subscribed_websites_str}')
await ctx.send(embed=embed)
|
Shows the reminders role, channel, times, and timezone settings.
|
https://github.com/aryanc403/remind/blob/c4be96a237b4d00e4f74519da6aa54a41f5422cb/remind/cogs/reminders.py#L382-L408
|
import asyncio
import random
import functools
import json
import pickle
import logging
import time
import datetime as dt
from pathlib import Path
from recordtype import recordtype
import pytz
import copy
from collections import defaultdict
from collections import namedtuple
import discord
from discord.ext import commands
import os
from remind.util.rounds import Round
from remind.util import discord_common
from remind.util import paginator
from remind import constants
from remind.util import clist_api as clist
_CONTESTS_PER_PAGE = 5
_CONTEST_PAGINATE_WAIT_TIME = 5 * 60
_FINISHED_CONTESTS_LIMIT = 5
_CONTEST_REFRESH_PERIOD = 10 * 60
_GUILD_SETTINGS_BACKUP_PERIOD = 6 * 60 * 60
_PYTZ_TIMEZONES_GIST_URL = ('https://gist.github.com/heyalexej/'
'8bf688fd67d7199be4a1682b3eec7568')
class RemindersCogError(commands.CommandError):
pass
def _contest_start_time_format(contest, tz):
start = contest.start_time.replace(tzinfo=dt.timezone.utc).astimezone(tz)
return f'{start.strftime("%d %b %y, %H:%M")} {tz}'
def _contest_duration_format(contest):
duration_days, duration_hrs, duration_mins, _ = discord_common.time_format(
contest.duration.total_seconds())
duration = f'{duration_hrs}h {duration_mins}m'
if duration_days > 0:
duration = f'{duration_days}d ' + duration
return duration
def _get_formatted_contest_desc(
start,
duration,
url,
max_duration_len):
em = '\N{EN SPACE}'
sq = '\N{WHITE SQUARE WITH UPPER RIGHT QUADRANT}'
desc = (f'`{em}{start}{em}|'
f'{em}{duration.rjust(max_duration_len, em)}{em}|'
f'{em}`[`link {sq}`]({url} "Link to contest page")')
return desc
def _get_embed_fields_from_contests(contests, localtimezone):
infos = [(contest.name,
_contest_start_time_format(contest,
localtimezone),
_contest_duration_format(contest),
contest.url) for contest in contests]
max_duration_len = max(len(duration) for _, _, duration, _ in infos)
fields = []
for name, start, duration, url in infos:
value = _get_formatted_contest_desc(
start, duration, url, max_duration_len)
fields.append((name, value))
return fields
async def _send_reminder_at(channel, role, contests, before_secs, send_time,
localtimezone: pytz.timezone):
delay = send_time - dt.datetime.utcnow().timestamp()
if delay <= 0:
return
await asyncio.sleep(delay)
values = discord_common.time_format(before_secs)
def make(value, label):
tmp = f'{value} {label}'
return tmp if value == 1 else tmp + 's'
labels = 'day hr min sec'.split()
before_str = ' '.join(make(value, label)
for label, value in zip(labels, values) if value > 0)
desc = f'About to start in {before_str}'
embed = discord_common.color_embed(description=desc)
for name, value in _get_embed_fields_from_contests(
contests, localtimezone):
embed.add_field(name=name, value=value)
await channel.send(role.mention, embed=embed)
_WEBSITE_ALLOWED_PATTERNS = defaultdict(list)
_WEBSITE_ALLOWED_PATTERNS['codeforces.com'] = ['']
_WEBSITE_ALLOWED_PATTERNS['codechef.com'] = [
'lunch', 'cook', 'rated']
_WEBSITE_ALLOWED_PATTERNS['atcoder.jp'] = [
'abc:', 'arc:', 'agc:', 'grand', 'beginner', 'regular']
_WEBSITE_ALLOWED_PATTERNS['topcoder.com'] = ['srm', 'tco']
_WEBSITE_ALLOWED_PATTERNS['codingcompetitions.withgoogle.com'] = ['']
_WEBSITE_ALLOWED_PATTERNS['facebook.com/hackercup'] = ['']
_WEBSITE_ALLOWED_PATTERNS['codedrills.io'] = ['']
_WEBSITE_DISALLOWED_PATTERNS = defaultdict(list)
_WEBSITE_DISALLOWED_PATTERNS['codeforces.com'] = [
'wild', 'fools', 'kotlin', 'unrated']
_WEBSITE_DISALLOWED_PATTERNS['codechef.com'] = ['unrated']
_WEBSITE_DISALLOWED_PATTERNS['atcoder.jp'] = []
_WEBSITE_DISALLOWED_PATTERNS['topcoder.com'] = []
_WEBSITE_DISALLOWED_PATTERNS['codingcompetitions.withgoogle.com'] = [
'registration']
_WEBSITE_DISALLOWED_PATTERNS['facebook.com/hackercup'] = []
_WEBSITE_DISALLOWED_PATTERNS['codedrills.io'] = []
_SUPPORTED_WEBSITES = [
'codeforces.com',
'codechef.com',
'atcoder.jp',
'topcoder.com',
'codingcompetitions.withgoogle.com',
'facebook.com/hackercup',
'codedrills.io'
]
GuildSettings = recordtype(
'GuildSettings', [
('channel_id', None), ('role_id', None),
('before', None), ('localtimezone', pytz.timezone('UTC')),
('website_allowed_patterns', defaultdict(list)),
('website_disallowed_patterns', defaultdict(list))])
def get_default_guild_settings():
allowed_patterns = copy.deepcopy(_WEBSITE_ALLOWED_PATTERNS)
disallowed_patterns = copy.deepcopy(_WEBSITE_DISALLOWED_PATTERNS)
settings = GuildSettings()
settings.website_allowed_patterns = allowed_patterns
settings.website_disallowed_patterns = disallowed_patterns
return settings
class Reminders(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.future_contests = None
self.contest_cache = None
self.active_contests = None
self.finished_contests = None
self.start_time_map = defaultdict(list)
self.task_map = defaultdict(list)
self.guild_map = defaultdict(get_default_guild_settings)
self.last_guild_backup_time = -1
self.member_converter = commands.MemberConverter()
self.role_converter = commands.RoleConverter()
self.logger = logging.getLogger(self.__class__.__name__)
@commands.Cog.listener()
@discord_common.once
async def on_ready(self):
guild_map_path = Path(constants.GUILD_SETTINGS_MAP_PATH)
try:
with guild_map_path.open('rb') as guild_map_file:
guild_map = pickle.load(guild_map_file)
for guild_id, guild_settings in guild_map.items():
self.guild_map[guild_id] = GuildSettings(**{key: value
for key, value
in guild_settings._asdict().items()
if key in GuildSettings._fields})
except BaseException:
pass
asyncio.create_task(self._update_task())
async def cog_after_invoke(self, ctx):
self._serialize_guild_map()
self._backup_serialize_guild_map()
self._reschedule_tasks(ctx.guild.id)
async def _update_task(self):
self.logger.info(f'Updating reminder tasks.')
self._generate_contest_cache()
contest_cache = self.contest_cache
current_time = dt.datetime.utcnow()
self.future_contests = [
contest for contest in contest_cache
if contest.start_time > current_time
]
self.finished_contests = [
contest for contest in contest_cache
if contest.start_time +
contest.duration < current_time
]
self.active_contests = [
contest for contest in contest_cache
if contest.start_time <= current_time <=
contest.start_time + contest.duration
]
self.active_contests.sort(key=lambda contest: contest.start_time)
self.finished_contests.sort(
key=lambda contest: contest.start_time +
contest.duration,
reverse=True
)
self.future_contests.sort(key=lambda contest: contest.start_time)
self.finished_contests = self.finished_contests[:_FINISHED_CONTESTS_LIMIT]
self.start_time_map.clear()
for contest in self.future_contests:
self.start_time_map[time.mktime(
contest.start_time.timetuple())].append(contest)
self._reschedule_all_tasks()
await asyncio.sleep(_CONTEST_REFRESH_PERIOD)
asyncio.create_task(self._update_task())
def _generate_contest_cache(self):
clist.cache(forced=False)
db_file = Path(constants.CONTESTS_DB_FILE_PATH)
with db_file.open() as f:
data = json.load(f)
contests = [Round(contest) for contest in data['objects']]
self.contest_cache = [
contest for contest in contests if contest.is_desired(
_WEBSITE_ALLOWED_PATTERNS,
_WEBSITE_DISALLOWED_PATTERNS)]
def get_guild_contests(self, contests, guild_id):
settings = self.guild_map[guild_id]
_, _, _, _, website_allowed_patterns, website_disallowed_patterns = settings
contests = [contest for contest in contests if contest.is_desired(
website_allowed_patterns, website_disallowed_patterns)]
return contests
def _reschedule_all_tasks(self):
for guild in self.bot.guilds:
self._reschedule_tasks(guild.id)
def _reschedule_tasks(self, guild_id):
for task in self.task_map[guild_id]:
task.cancel()
self.task_map[guild_id].clear()
self.logger.info(f'Tasks for guild {guild_id} cleared')
if not self.start_time_map:
return
settings = self.guild_map[guild_id]
if any(setting is None for setting in settings):
return
channel_id, role_id, before, localtimezone, website_allowed_patterns, website_disallowed_patterns = settings
guild = self.bot.get_guild(guild_id)
channel, role = guild.get_channel(channel_id), guild.get_role(role_id)
for start_time, contests in self.start_time_map.items():
contests = self.get_guild_contests(contests, guild_id)
if not contests:
continue
for before_mins in before:
before_secs = 60 * before_mins
task = asyncio.create_task(
_send_reminder_at(
channel,
role,
contests,
before_secs,
start_time -
before_secs, localtimezone)
)
self.task_map[guild_id].append(task)
self.logger.info(
f'{len(self.task_map[guild_id])} '
f'tasks scheduled for guild {guild_id}')
@staticmethod
def _make_contest_pages(contests, title, localtimezone):
pages = []
chunks = paginator.chunkify(contests, _CONTESTS_PER_PAGE)
for chunk in chunks:
embed = discord_common.color_embed()
for name, value in _get_embed_fields_from_contests(
chunk, localtimezone):
embed.add_field(name=name, value=value, inline=False)
pages.append((title, embed))
return pages
async def _send_contest_list(self, ctx, contests, *, title, empty_msg):
if contests is None:
raise RemindersCogError('Contest list not present')
if len(contests) == 0:
await ctx.send(embed=discord_common.embed_neutral(empty_msg))
return
pages = self._make_contest_pages(
contests, title, self.guild_map[ctx.guild.id].localtimezone)
paginator.paginate(
self.bot,
ctx.channel,
pages,
wait_time=_CONTEST_PAGINATE_WAIT_TIME,
set_pagenum_footers=True
)
def _serialize_guild_map(self):
out_path = Path(constants.GUILD_SETTINGS_MAP_PATH)
with out_path.open(mode='wb') as out_file:
pickle.dump(self.guild_map, out_file)
def _backup_serialize_guild_map(self):
current_time_stamp = int(dt.datetime.utcnow().timestamp())
if current_time_stamp - self.last_guild_backup_time < _GUILD_SETTINGS_BACKUP_PERIOD:
return
self.last_guild_backup_time = current_time_stamp
out_path = Path(
constants.GUILD_SETTINGS_MAP_PATH +
"_" +
str(current_time_stamp))
with out_path.open(mode='wb') as out_file:
pickle.dump(self.guild_map, out_file)
@commands.group(brief='Commands for contest reminders',
invoke_without_command=True)
async def remind(self, ctx):
await ctx.send_help(ctx.command)
@remind.command(brief='Set reminder settings')
@commands.has_any_role('Admin', constants.REMIND_MODERATOR_ROLE)
async def here(self, ctx, role: discord.Role, *before: int):
if not role.mentionable:
raise RemindersCogError(
'The role for reminders must be mentionable')
if not before or any(before_mins < 0 for before_mins in before):
raise RemindersCogError('Please provide valid `before` values')
before = list(before)
before = sorted(before, reverse=True)
self.guild_map[ctx.guild.id].role_id = role.id
self.guild_map[ctx.guild.id].before = before
self.guild_map[ctx.guild.id].channel_id = ctx.channel.id
await ctx.send(
embed=discord_common.embed_success(
'Reminder settings saved successfully'))
@remind.command(brief='Resets the judges settings to the default ones')
@commands.has_any_role('Admin', constants.REMIND_MODERATOR_ROLE)
async def reset_judges_settings(self, ctx):
_, _, _, _, default_allowed_patterns, default_disallowed_patterns = get_default_guild_settings()
self.guild_map[ctx.guild.id].website_allowed_patterns = default_allowed_patterns
self.guild_map[ctx.guild.id].website_disallowed_patterns = default_disallowed_patterns
await ctx.send(embed=discord_common.embed_success(
'Succesfully reset the judges settings to the default ones'))
@remind.command(brief='Show reminder settings')
|
MIT License
|
consbio/trefoil
|
trefoil/analysis/timeseries.py
|
linear_regression
|
python
|
def linear_regression(timesteps, values, full=False):
assert len(values.shape) == 3
assert values.shape[0] == timesteps.shape[0]
shape = values.shape
y = values.reshape((shape[0], shape[1] * shape[2]))
fit, residuals = numpy.linalg.lstsq(numpy.c_[timesteps, numpy.ones_like(timesteps)], y)[:2]
slopes = fit[0].reshape((shape[1], shape[2]))
intercepts = fit[1].reshape((shape[1], shape[2]))
mask = None
if hasattr(values, 'mask'):
mask = values.mask[0]
slopes = numpy.ma.masked_array(slopes, mask=mask)
intercepts = numpy.ma.masked_array(intercepts, mask=mask)
if not full:
return slopes, intercepts
from scipy.stats.distributions import t as t_dist
r2 = (1 - residuals / (y.shape[0] * y.var(axis=0)))
r = numpy.sqrt(r2)
r2 = r2.reshape((shape[1], shape[2]))
tiny = 1.0e-20
df = timesteps.shape[0] - 2
t = r * numpy.sqrt(df / ((1.0 - r + tiny)*(1.0 + r + tiny)))
p = (2 * t_dist.sf(numpy.abs(t), df)).reshape(shape[1], shape[2])
if mask is not None:
r2 = numpy.ma.masked_array(r2, mask=mask)
p = numpy.ma.masked_array(p, mask=mask)
return slopes, intercepts, r2, p
|
Perform linear regression using linear algebra operators
Note: does not account for missing data within time series.
:param timesteps: 1D array of timesteps to use for x value of linear regression
:param values: 3D array of data to use for y value of linear regression, assumes timestep is first axis
:param full: return full statistics or just slopes & intercepts. Default is False. If True, requires scipy.
:returns: (slopes, intercepts) or (slopes, intercepts, r-squared, p-value) if full is True
|
https://github.com/consbio/trefoil/blob/f8da46fa67240cc3f1ef0460e3a249e57e5f7224/trefoil/analysis/timeseries.py#L79-L133
|
import numpy
from trefoil.analysis.summary import summarize_areas_by_category, calculate_weighted_statistics
from trefoil.utilities.window import Window
DAYS_PER_MONTH = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
MONTH_LABELS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
def extract_categorical_timeseries_by_area(values, areas, timestep_indicies, window=None):
assert len(values.shape) == 3
if window is not None:
assert isinstance(window, Window)
results = dict()
num_timesteps = len(timestep_indicies)
for index in timestep_indicies:
if window is not None:
data = window.clip(values, index).ravel()
else:
data = values[index].ravel()
data = numpy.ma.masked_array(data, mask=areas.mask)
category_summary = summarize_areas_by_category(data.astype("i"), areas)
for category in category_summary:
if not category in results:
results[category] = numpy.zeros(num_timesteps)
results[category][index] = category_summary[category]
return results
def extract_statistics_timeseries_by_weight(values, weights, statistics, window=None):
assert len(values.shape) == 3
if window is not None:
assert isinstance(window, Window)
results = dict()
for statistic in statistics:
results[statistic] = numpy.zeros(values.shape[0])
for index in xrange(values.shape[0]):
if window is not None:
data = window.clip(values, index).ravel()
else:
data = values[index].ravel()
data = numpy.ma.masked_array(data, mask=weights.mask)
statistics_results = calculate_weighted_statistics(data, weights, statistics)
for stat_index, statistic in enumerate(statistics):
results[statistic][index] = statistics_results[stat_index]
return results
|
BSD 3-Clause New or Revised License
|
efficiosoft/hass-apps
|
hass_apps/schedy/expression/helpers.py
|
CustomEnvironmentHelper.update_environment
|
python
|
def update_environment(self) -> None:
script = self._app.expression_environment_script
if script is not None:
self._room.log(
"Executing the expression_environment script.", level="DEBUG"
)
exec(script, self._env)
|
Executes the expression_environment script.
|
https://github.com/efficiosoft/hass-apps/blob/2d095d6fffc12bde76a7b8f356fa266de6059b37/hass_apps/schedy/expression/helpers.py#L103-L111
|
import typing as T
if T.TYPE_CHECKING:
from .. import schedule as schedule_mod
from ..room import Room
import datetime
import inspect
import itertools
class HelperBase:
namespace = ""
order = 0
def __init__(
self, room: "Room", now: datetime.datetime, env: T.Dict[str, T.Any]
) -> None:
self._room = room
self._app = room.app
self._now = now
self._env = env
def update_environment(self) -> None:
if self.namespace:
self._env[self.namespace] = self
else:
base_member_names = dir(HelperBase)
for name, member in inspect.getmembers(self):
if not name.startswith("_") and name not in base_member_names:
self._env[name] = member
class BasicHelper(HelperBase):
def __init__(self, *args: T.Any, **kwargs: T.Any) -> None:
super().__init__(*args, **kwargs)
self.app = self._app
self.room = self._room
self.room_name = self._room.name
self.datetime = datetime
self.now = self._now
self.date = self._now.date()
self.time = self._now.time()
self.schedule_snippets = self._app.cfg["schedule_snippets"]
@staticmethod
def is_empty(iterable: T.Iterable) -> bool:
try:
if isinstance(iterable, T.Iterator):
next(iterable)
else:
next(iter(iterable))
except StopIteration:
return True
return False
@staticmethod
def round_to_step(
value: T.Union[float, int],
step: T.Union[float, int],
decimal_places: int = None,
) -> T.Union[float, int]:
value = step * round(value / step)
if decimal_places is not None:
value = round(value, decimal_places)
return value
class CustomEnvironmentHelper(HelperBase):
order = 1000
|
Apache License 2.0
|
googleapis/python-compute
|
google/cloud/compute_v1/services/region_health_check_services/pagers.py
|
ListPager.__init__
|
python
|
def __init__(
self,
method: Callable[..., compute.HealthCheckServicesList],
request: compute.ListRegionHealthCheckServicesRequest,
response: compute.HealthCheckServicesList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
self._method = method
self._request = compute.ListRegionHealthCheckServicesRequest(request)
self._response = response
self._metadata = metadata
|
Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListRegionHealthCheckServicesRequest):
The initial request object.
response (google.cloud.compute_v1.types.HealthCheckServicesList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
|
https://github.com/googleapis/python-compute/blob/703ac1703bc159dcd81e96759606ad896f125996/google/cloud/compute_v1/services/region_health_check_services/pagers.py#L48-L71
|
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class ListPager:
|
Apache License 2.0
|
lazywen/graily
|
graily.py
|
WSGIServerHandler.flush
|
python
|
def flush(self):
return not bool(self.request_handler.iostream._write_buffer)
|
return True if all data has been sent
|
https://github.com/lazywen/graily/blob/7613cc6654cb671d45446888550670d018959bcf/graily.py#L724-L726
|
__all__ = [
'TCPServer', 'ThreadPollTCPServer', 'StreamRequestHandler',
'HTTPServer', 'ThreadPollHTTPServer', 'BaseHTTPRequestHandler',
'HTTPResponse', 'StaticFileHandler'
'WSGIServer', 'WSGIRequestHandler', 'make_server',
'BaseTemplate', 'templates', 'MakoTemplate',
'Graily', 'Concurrent', 'init_log'
]
import os, sys, re, errno, time, socket, select, logging, random, types, io
import mimetypes, functools, heapq
import traceback
assert sys.version_info>=(3,0,0), "Only support for Python3"
import queue
import _thread, threading
from select import epoll
from urllib.parse import urlparse, parse_qs
class GrailyPoll:
READ = select.EPOLLIN
WRITE = select.EPOLLOUT
ERROR = select.EPOLLERR | select.EPOLLHUP
MAX_KEEPALIVE_TIME = 300
server_name = "Graily"
def __init__(self, server_address, RequestHandler, allow_reuse_address=True,
max_input_size=2097152):
self.server_address = server_address
self.RequestHandler = RequestHandler
self.allow_reuse_address = allow_reuse_address
self.max_input_size = max_input_size
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sockets = {self.socket.fileno(): self.socket}
self._shutdown_request = True
self._handlers = {}
self._keepalive = {}
self.server_bind()
def init_socket(self):
logging.info('Starting server at {}'.format(self.server_address))
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
self.socket.setblocking(False)
self.socket.listen(100)
self.init_epoll()
def init_epoll(self):
self.epoll = epoll()
self.epoll.register(self.socket, self.READ | self.ERROR)
def server_bind(self):
self.init_socket()
self.init_epoll()
def server_close(self):
self.socket.close()
def shutdown_request(self, request, flag=socket.SHUT_WR):
try: request.shutdown(flag)
except OSError: pass
def close_request(self, request):
try: request.close()
except OSError: pass
def _run(self, timeout=1):
try:
event_pairs = self.epoll.poll(timeout)
except Exception as e:
if errno_from_exception(e) == errno.EINTR:
return
else: raise
for fd, event in event_pairs:
sock = self._sockets[fd]
if event & self.ERROR:
self.epoll.unregister(fd)
del self._sockets[fd]
self._handlers.pop(sock, None)
self._keepalive.pop(sock, None)
self.close_request(sock)
else:
try:
if sock is self.socket:
if not self._shutdown_request:
request, client_address = sock.accept()
request.setblocking(False)
_fd = request.fileno()
self._sockets[_fd] = request
self.epoll.register(request, self.READ | self.ERROR)
self._keepalive[request] = time.time()
else: time.sleep(timeout)
elif event & self.READ:
self._keepalive[sock] = time.time()
self.handle_request(sock)
elif event & self.WRITE:
self.handle_response(sock)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
pass
except Exception as e:
self.handle_exception(e, sock)
if len(event_pairs) == 0:
for _fd, _sock in self._sockets.items():
if _sock not in self._keepalive:
self._keepalive[_sock] = time.time()
for _sock, _time in self._keepalive.items():
if time.time()-_time >= self.MAX_KEEPALIVE_TIME:
self.shutdown_request(_sock)
def handle_exception(self, err, sock):
logging.exception('')
if sock is self.socket: time.sleep(1)
else: self.shutdown_request(sock)
def handle_request(self, sock):
if sock not in self._handlers:
self._handlers[sock] = self.RequestHandler(sock, self)
self._handlers[sock]._run()
def handle_response(self, sock):
handler = self._handlers[sock]
sent = sock.send(handler.iostream._write_buffer)
if sent >= len(handler.iostream._write_buffer):
self.update_handler(sock, self.READ)
handler.iostream._write_buffer = handler.iostream._write_buffer[sent:]
def update_handler(self, sock, event):
self.epoll.modify(sock, event | self.ERROR)
class TCPServer(GrailyPoll):
def __init__(self, server_address, RequestHandler, **kwargs):
super(TCPServer, self).__init__(server_address, RequestHandler,
**kwargs)
def serve_forever(self):
self._shutdown_request = False
while not self._shutdown_request or len(self.sockets)>1:
self._run()
class ThreadPollTCPServer(TCPServer):
def __init__(self, server_address, RequestHandler,
poll_size=100, max_tasks=1000, **kwargs):
super(ThreadPollTCPServer, self).__init__(server_address, RequestHandler, **kwargs)
if Concurrent._concurrency:
self.concurrent = Concurrent(poll_size, max_tasks, self)
def put_task(self, task):
self.concurrent.tasks.put(task)
def serve_forever(self):
if Concurrent._concurrency: self.concurrent.start()
self._shutdown_request = False
while not self._shutdown_request or len(self.sockets)>1:
self._run()
class HTTPServer(TCPServer):
class ThreadPollHTTPServer(ThreadPollTCPServer):
class WSGIServer(HTTPServer):
application = None
def server_bind(self):
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_address[1])
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class Concurrent:
_concurrency = False
def __init__(self, poll_size, max_tasks, server):
self.poll_size = poll_size
self.tasks = queue.Queue(max_tasks)
self.server = server
self._thread_poll = []
self._running = False
def start(self):
logging.info('starting thread poll ...')
if self._running:
logging.warning('thread poll already started!')
return
def worker():
while True:
task = self.tasks.get()
next(task['run'])
try: res = task['run'].send(task['args'])
except StopIteration: pass
except Exception as e:
self.server.handle_exception(e, task['socket'])
else:
if res and 'result_handler' in task:
task['result_handler'](res)
if 'callback' in task and len(task['callback'])>0:
for callback, args in task['callback']:
callback(*args)
for i in range(self.poll_size):
td = threading.Thread(target=worker, args=())
td.setDaemon(True)
td.start()
self._thread_poll.append(td)
self._running = True
def allocate(self):
pass
class register:
def __init__(self, func):
self.func = func
self.__dict__['_graily_concurrency'] = True
if not Concurrent._concurrency:
Concurrent._concurrency = True
def __call__(self, *args, **kwargs):
ora_args = yield
new_args = []
if ora_args: new_args.extend(ora_args)
new_args.extend(args)
yield self.func(*new_args, **kwargs)
class BaseRequestHandler:
def __init__(self, request, server):
self.request = request
self.server = server
self.iostream = BaseIOStream(request, server)
self.response_handler = self.dataReceived
self._initialize()
def _initialize(self):
pass
def write(self, res):
if not self.iostream.write(res):
self.server.update_handler(self.request, self.server.WRITE)
def close(self):
self.server.shutdown_request(self.request)
def _run(self):
data_ready = self.iostream.read()
if data_ready and self.verify_request():
if self.parse_request():
prg = self.dataReceived()
if isinstance(prg, types.GeneratorType) and '_graily_concurrency' in self.dataReceived.__dict__:
task = {'socket': self.request, 'run': prg, 'args':(self,),
'callback': [(self._initialize, ())]}
self.server.put_task(task)
else: self._initialize()
def parse_request(self):
raise NotImplementedError
def verify_request(self):
raise NotImplementedError
def dataReceived(self):
raise NotImplementedError
class StreamRequestHandler(BaseRequestHandler):
def _initialize(self):
pass
def parse_request(self):
self.data = self.iostream._read_buffer.decode()
self.iostream._read_buffer = b""
return True
def verify_request(self):
return bool(self.iostream._read_buffer)
class BaseHTTPRequestHandler(BaseRequestHandler):
SUPPORT_HTTP_VERSION = ('HTTP/1.0', 'HTTP/1.1')
HTTP_METHOD = ('HEAD', 'GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'TRACE', 'CONNECT')
ERROR_MESSAGE_TEMPLATE = ('<html>'
'<head><title>%(code)d %(msg)s</title></head>'
'<center><h1>%(code)d %(msg)s</h1></center>'
'<center><p>%(desc)s</p></center>'
'</body>'
'</html>')
DEBUG_MESSAGE_TEMPLATE = ()
def _initialize(self):
self._send_buffer = b""
self.keep_alive = False
self.command = ""
self.host = ""
self.parameters = {}
self.request_url = ""
self.request_head = None
self.request_body = None
self.request_path = ""
self.request_query = ""
self.http_version = ""
self.headers = {}
self.respond_status = 200
self.request_options = {}
self.header_sent = False
self.environ = {'write':self.write, 'send_error':self.send_error,
'set_header':self.set_header, 'set_response_status':self.set_response_status}
def verify_request(self):
return b'\r\n\r\n' in self.iostream._read_buffer
def parse_request(self):
self.data = self.iostream._read_buffer.decode()
self.iostream._read_buffer = b""
slices = self.data.split('\r\n\r\n')
if len(slices) > 2:
self.send_error(400)
return False
self.environ['request_head'] = self.request_head = slices[0]
self.environ['request_body'] = self.request_body = slices[1]
request_head = io.StringIO(self.request_head)
request_line = request_head.readline().rstrip('\r\n')
args = request_line.split()
if len(args) == 3:
self.command = args[0]
self.request_url = args[1]
self.http_version = args[2]
_urlpar = urlparse(self.request_url)
self.environ['request_path'] = self.request_path = _urlpar.path
self.environ['request_query'] = self.request_query = _urlpar.query
if self.http_version not in self.SUPPORT_HTTP_VERSION:
self.send_error(505, "HTTP Version {} Not Supported".format(self.http_version))
return False
if self.command not in self.HTTP_METHOD:
self.send_error(405, "Not Allowed: {}".format(self.command))
return False
while True:
line = request_head.readline()
if not line: break
pos = line.find(':')
if pos < 0:
self.send_error(400); return False
self.request_options[line[0:pos].strip()] = line[pos+1:].strip()
if 'Host' not in self.request_options:
self.send_error(400); return False
if self.request_options.get('Connection', '').lower() == 'keep-alive':
self.keep_alive = True
if self.command == 'GET':
self.parameters = parse_qs(self.request_query)
elif self.command == 'POST':
self.parameters = parse_qs(self.request_body)
if self.parameters:
for key, val in self.parameters.items():
if type(val)==list and len(val)==1:
self.parameters[key] = val[0]
self.environ['parameters'] = self.parameters
return True
elif len(args) == 2:
self.send_error(400, 'not support')
else: self.send_error(400, "bad request syntax")
return False
def set_response_status(self, code):
assert type(code) == int
self.respond_status = code
def set_header(self, opt, val):
self.headers[opt] = val
def send_response(self, body=None):
if not self.header_sent:
self._send_buffer = ("{} {} {}\r\n".format(self.http_version, self.respond_status, self.RESPONSES_CODE.get(self.respond_status, '???'))).encode('latin-1', 'strict')
self.respond_status = 200
for opt, val in self.headers.items():
self._send_buffer += ("{}: {}\r\n".format(opt, val)).encode('latin-1', 'strict')
self._send_buffer += b'\r\n'
self.headers = {}
self.header_sent = True
_data = self._send_buffer
if type(body)==str:
body = body.encode('utf-8', 'replace')
if body: _data += body
self._write(_data)
self._send_buffer = b""
def send_error(self, code, desc=""):
msg = self.RESPONSES_CODE.get(code)
body = (self.ERROR_MESSAGE_TEMPLATE % {'code':code, 'msg':msg, 'desc': desc})
self.set_response_status(code)
self.set_header("Content-Type", "text/html; charset=UTF-8")
self.set_header('Connection', 'close')
self.set_header('Content-Length', int(len(body)))
self.send_response(body)
self.server.shutdown_request(self.request)
def _write(self, res):
if not self.iostream.write(res):
self.server.update_handler(self.request, self.server.WRITE)
def write(self, data, finish=False):
self.set_header('Connection', 'keep-alive')
if finish and "Content-Length" not in self.headers:
try: data_length = len(data)
except: pass
else: self.set_header('Content-Length', str(data_length))
if "Content-Type" not in self.headers:
self.set_header("Content-Type", "text/html; charset=UTF-8")
if type(data) == types.GeneratorType:
headers_sent = False
for _d in data:
if type(_d)==str: _d=_d.encode('utf-8','replace')
if not headers_sent:
self.send_response()
headers_sent = True
if _d: self._write(_d)
else:
self.send_response(data)
def _run(self):
data_ready = self.iostream.read()
if data_ready and self.verify_request():
if self.parse_request():
self.dataReceived()
def dataReceived(self):
_concurrency = False
if self.command == 'HEAD':
self.write('')
elif self.command in ('GET', 'POST'):
if hasattr(self, 'get_handler'):
handler, args = self.get_handler(self.request_path)
ins = handler(self.environ)
if hasattr(ins, self.command.lower()):
func = getattr(ins, self.command.lower())
res = func(*args)
if type(res) == types.GeneratorType and '_graily_concurrency' in func.__dict__:
_concurrency = True
task = {'socket': self.request, 'run': res, 'args':(self,),
'callback': [(self._initialize, ())],
'result_handler':functools.partial(self.write, finish=True)}
self.server.put_task(task)
else:
if res: self.write(res, finish=True)
else: self.send_error(501, "{} Method Not Implemented".format(self.command))
else:
if hasattr(self, self.command):
res = getattr(self, self.command)()
if res: self.write(res, finish=True)
else: self.send_error(501, "{} Method Not Implemented".format(self.command))
if not _concurrency:
if not self.keep_alive:
self.server.shutdown_request(self.request)
self._initialize()
RESPONSES_CODE = {
100: 'Continue', 101: 'Switching Protocols',
200: 'OK', 201: 'Created', 202: 'Accepted', 203: 'Non-Authoritative Information',
204: 'No Content', 205: 'Reset Content', 206: 'Partial Content',
300: 'Multiple Choices', 301: 'Moved Permanently', 302: 'Found', 303: 'See Other',
304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect',
400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden',
404: 'Not Found', 405: 'Method Not Allowed', 406: 'Not Acceptable',
407: 'Proxy Authentication Required', 408: 'Request Timeout', 409: 'Conflict',
410: 'Gone', 411: 'Length Required', 412: 'Precondition Failed',
413: 'Request Entity Too Large', 414: 'Request-URI Too Long', 415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', 428: 'Precondition Required',
429: 'Too Many Requests', 431: 'Request Header Fields Too Large',
500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway',
503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported',
511: 'Network Authentication Required',
}
class HTTPResponse:
def __init__(self, environ):
self.base_environ = environ.copy()
self.headers = {}
self.init_environ()
def init_environ(self):
for k,v in self.base_environ.items():
setattr(self, k ,v)
class NotFoundHandler(HTTPResponse):
code = 404
def get(self, *args):
return self.send_error(404, "not found for: {}".format(self.request_path))
def post(self, *args):
return self.get(*args)
class StaticFileHandler(HTTPResponse):
static_path = None
def get(self, rel_path):
if not self.static_path:
raise ValueError("static_path not set!")
if rel_path.endswith('/'):
return self.send_error(403, "Your Request Is Forbidden: {}".format(rel_path))
path = os.path.join(self.static_path, rel_path)
if os.path.isfile(path):
self.set_response_status(200)
extension = os.path.splitext(path)[1].lower()
ctype = self.extensions_map.get(extension, self.extensions_map[''])
self.set_header("Content-Type", ctype)
f = open(path, 'rb')
fs = os.fstat(f.fileno())
self.set_header("Content-Length", str(fs[6]))
if fs[6] < 102400: return f.read()
else: return self.yield_file(f)
else: return self.send_error(404, "Not Found: {}".format(rel_path))
def yield_file(self, fd):
chunk_size = 61440
_c = fd.read(chunk_size)
while _c:
yield _c
_c = fd.read(chunk_size)
@classmethod
def set_path(cls, path):
full_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), path)
if os.path.isdir(full_path):
StaticFileHandler.static_path = full_path
else: raise ValueError("no such path: {}".format(full_path))
return cls
if not mimetypes.inited:
mimetypes.init()
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream',
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
class WSGIServerHandler:
def __init__(self, request_handler, stdin, stdout, stderr, environ,
multithread=True, multiprocess=False):
self.request_handler = request_handler
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
self.request_handler = None
self.headers_sent = False
self._send_buffer = b""
def setup_environ(self):
env = self.environ = self.base_env.copy()
env['wsgi.input'] = self.stdin
env['wsgi.errors'] = self.stderr
env['wsgi.multithread']= self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
def handle_error(self, e):
self.request_handler.server.handle_exception(e,
self.request_handler.request)
def run(self, application):
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except Exception as e:
self.handle_error(e)
def start_response(self, status, headers):
self.status = status.strip()
self.headers = self.format_headers(headers)
self.headers.update(self.request_handler.headers)
assert type(status)==str, "Status must a str type"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
return self.write
def finish_response(self):
try:
for data in self.result:
if type(data) == str: data = data.encode('utf-8', replace)
self._send_buffer += data
self.write(self._send_buffer)
finally:
pass
def format_headers(self, headers):
return dict(list(headers))
def set_header(self, key, val):
self.headers[key] = val
def send_headers(self):
if 'Content-Length' not in self.headers:
self.set_header('Content-Length', len(self._send_buffer))
_headers = "{} {}\r\n".format(self.environ['SERVER_PROTOCOL'], self.status)
for k, v in self.headers.items():
_headers += "{}: {}\r\n".format(k, v)
_headers += "\r\n"
self._write(_headers)
def close(self):
self.request_handler.close()
def _write(self, data):
self.stdout.write(data)
def write(self, data):
if not self.headers_sent:
self.send_headers()
self._write(data)
|
Apache License 2.0
|
qiyunlab/hgtector
|
hgtector/util.py
|
contain_words
|
python
|
def contain_words(text, words):
return re.search(r'\b{}\b'.format('|'.join(words)), text,
re.IGNORECASE) is not None
|
Check if a string contains any of given words
Parameters
----------
text : str
query string
words : list of str
target words
Returns
-------
bool
whether string contains at least one of given words
|
https://github.com/qiyunlab/hgtector/blob/0cadadda492b2c646efd5b9e183a638c076b3b01/hgtector/util.py#L501-L517
|
import re
from os import listdir
from os.path import (
join, isfile, basename, splitext, dirname, realpath, expanduser)
import subprocess
import gzip
import bz2
import lzma
import datetime
import yaml
zipdict = {'.gz': gzip, '.bz2': bz2, '.xz': lzma, '.lz': lzma}
def timestamp():
return datetime.datetime.now()
def load_configs():
file = find_config_file()
if file is not None:
with open(file, 'r') as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def find_config_file():
fname = 'config.yml'
fp = fname
if not isfile(fp):
fp = join(expanduser('~'), '.hgtector', fname)
if not isfile(fp):
fp = join(dirname(realpath(__file__)), fname)
if isfile(fp):
return fp
def get_config(obj, attr, entry, func=None):
if obj.cfg is None:
return
if getattr(obj, attr, None) is not None:
return
keys = entry.split('.')
d_ = obj.cfg
for key in keys[:-1]:
try:
d_ = d_[key]
except KeyError:
return
try:
val = d_[keys[-1]]
except KeyError:
return
if val is None:
return
if func:
val = func(val)
setattr(obj, attr, val)
def arg2bool(val):
if val is None:
return False
if isinstance(val, bool):
return val
val = val.lower()
if val in ('yes', 'true', 't', 'y', '1'):
return True
elif val.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def read_file(fp):
ext = splitext(fp)[1]
zipfunc = getattr(zipdict[ext], 'open') if ext in zipdict else open
return zipfunc(fp, 'rt')
def list_from_param(param):
if not param:
return []
elif isinstance(param, list):
return param
elif isinstance(param, str):
if isfile(param):
with read_file(param) as f:
return f.read().splitlines()
else:
return param.split(',')
def dict_from_param(param):
if not param:
return {}
elif isinstance(param, dict):
return param
elif isinstance(param, str):
if isfile(param):
with read_file(param) as f:
try:
return dict(x.split('\t') for x in f.read().splitlines())
except ValueError:
raise ValueError(f'Invalid dictionary file: "{param}".')
else:
try:
return dict(x.split(':') for x in param.split(','))
except ValueError:
raise ValueError(f'Invalid dictionary string: "{param}".')
def run_command(cmd, capture=True, merge=True):
res = subprocess.run(
cmd, shell=True,
stdout=(subprocess.PIPE if capture else None),
stderr=(subprocess.STDOUT if merge else subprocess.DEVNULL))
return (res.returncode,
res.stdout.decode('utf-8').splitlines() if capture else None)
def file2id(fname):
id_, ext = splitext(basename(fname))
if ext in zipdict:
id_ = splitext(id_)[0]
return id_
def id2file_map(dir_, ext=None, ids=None):
res = {}
if ext is not None:
if not ext.startswith('.'):
ext = '.' + ext
n = len(ext)
for fname in listdir(dir_):
id_ = None
if ext is not None:
if fname.endswith(ext):
id_ = fname[:-n]
else:
id_, ext_ = splitext(fname)
if ext_ in zipdict:
id_ = splitext(id_)[0]
if id_ is None:
continue
if ids is not None and id_ not in ids:
continue
if id_ in res:
raise ValueError(f'Ambiguous files for Id: {id_}.')
res[id_] = fname
return res
def read_input_prots(fp):
prots = []
isfasta = None
used = {}
lines = []
with read_file(fp) as f:
for line in f:
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
continue
if isfasta is None:
isfasta = line.startswith('>')
if isfasta:
lines.append(line)
else:
if line in used:
prot_ = prots[used[line]]
prot_['dups'] = prot_.get('dups', 0) + 1
else:
used[line] = len(prots)
prots.append({'id': line, 'product': '', 'seq': ''})
if isfasta:
for id_, product, seq in read_fasta(lines):
if id_ in used:
prot_ = prots[used[id_]]
prot_['dups'] = prot_.get('dups', 0) + 1
else:
used[id_] = len(prots)
prots.append({'id': id_, 'product': product, 'seq': seq})
return prots
def read_fasta(lines):
seqs = []
for line in lines:
line = line.rstrip('\r\n')
if line.startswith('>'):
x = line[1:].split(None, 1)
seqs.append([x[0], get_product(x[1]) if len(x) > 1 else '', ''])
else:
seqs[-1][2] += line.rstrip('*')
return seqs
def write_fasta(seqs, f):
for id_, seq in seqs:
f.write(f'>{id_}\n{seq}\n')
def _get_taxon(tid, taxdump):
try:
return taxdump[tid]
except KeyError:
raise ValueError(f'TaxID {tid} is not found in taxonomy database.')
def describe_taxon(tid, taxdump):
taxon = _get_taxon(tid, taxdump)
name, rank = taxon['name'], taxon['rank']
return (f'{name} (no rank)' if not rank or rank == 'no rank'
else f'{rank} {name}')
def is_capital(name):
try:
return name.lstrip('[')[0].isupper()
except IndexError:
return False
def is_latin(name):
if name == '':
return False
elif name.count(' ') != 1:
return False
str_ = name.replace(' ', '')
if not str_.istitle():
return False
elif not str_.isalpha():
return False
return True
|
BSD 3-Clause New or Revised License
|
spedas/pyspedas
|
pyspedas/themis/spacecraft/particles/esa.py
|
esa
|
python
|
def esa(trange=['2007-03-23', '2007-03-24'],
probe='c',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
return load(instrument='esa', trange=trange, level=level,
suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames,
downloadonly=downloadonly, notplot=notplot,
probe=probe, time_clip=time_clip, no_update=no_update)
|
This function loads Electrostatic Analyzer (ESA) data
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe: str or list of str
Spacecraft probe letter(s) ('a', 'b', 'c', 'd' and/or 'e')
level: str
Data type; Valid options: 'l1', 'l2'
suffix: str
The tplot variable names will be given this suffix.
By default, no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified
in the trange keyword
Returns:
List of tplot variables created.
|
https://github.com/spedas/pyspedas/blob/b6ac3bbaa481dee658ba4d5d259c2831e6740d6d/pyspedas/themis/spacecraft/particles/esa.py#L5-L70
|
from pyspedas.themis.load import load
|
MIT License
|
totalgood/twip
|
twip/util.py
|
Tokenizer.__reduce__
|
python
|
def __reduce__(self):
return (Tokenizer, (None, self.regex, self.strip, self.nonwords, self.nonwords_set, self.nonwords_regex,
self.lower, self.stemmer_name, self.ngrams))
|
Unpickling constructor and args so that pickling can be done efficiently without any bound methods, etc
|
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/util.py#L151-L154
|
from __future__ import division, print_function, absolute_import, unicode_literals
from builtins import str
from past.builtins import basestring
try:
from itertools import izip as zip
except ImportError:
pass
import re
from itertools import islice
import logging
from gensim.corpora import Dictionary
from pug.nlp.regex import CRE_TOKEN, RE_NONWORD
from pug.nlp.segmentation import str_strip, str_lower, passthrough
log = logging.getLogger('loggly')
passthrough = passthrough
class Tokenizer(object):
__safe_for_unpickling__ = True
def __init__(self, doc=None, regex=CRE_TOKEN, strip=True, nonwords=False, nonwords_set=None, nonwords_regex=RE_NONWORD,
lower=None, stem=None, ngram_delim=' ', ngrams=1):
self.ngram_delim = ngram_delim
self.strip_chars = None
if isinstance(strip, str):
self.strip_chars = strip
self.strip = None
elif strip is True:
self.strip_chars = '-_*`()"' + '"'
strip = strip or None
self.strip = strip if callable(strip) else (str_strip if strip else None)
self.doc = str(doc)
self.regex = regex
if isinstance(self.regex, str):
self.regex = re.compile(self.regex)
self.nonwords = nonwords
self.nonwords_set = nonwords_set or set()
self.nonwords_regex = nonwords_regex
self.lower = lower if callable(lower) else (str_lower if lower else None)
self.ngrams = ngrams or 1
if isinstance(self.nonwords_regex, str):
self.nonwords_regex = re.compile(self.nonwords_regex)
elif self.nonwords:
try:
self.nonwords_set = set(self.nonwords)
except TypeError:
self.nonwords_set = set(['None', 'none', 'and', 'but'])
self.nonwords = not bool(self.nonwords)
def __call__(self, doc):
self.doc = str(doc)
return self
tokenize = __call__
|
MIT License
|
biothings/biothings.api
|
biothings/hub/datarelease/publisher.py
|
SnapshotPublisher.pre_publish
|
python
|
def pre_publish(self, snapshot_name, repo_conf, build_doc):
return self.run_pre_publish_snapshot(snapshot_name, repo_conf,
build_doc)
|
Pre-publish hook, running steps declared in config,
but also whatever would be defined in a sub-class
|
https://github.com/biothings/biothings.api/blob/8e044af822200a24f27c4e08d9f779c8ba1d8920/biothings/hub/datarelease/publisher.py#L793-L799
|
import re
import os
import time
import glob
import copy
from datetime import datetime
from dateutil.parser import parse as dtparse
import json
import asyncio
from functools import partial
import subprocess
from biothings.utils.mongo import get_previous_collection, get_target_db
from biothings.utils.hub_db import get_src_build, get_source_fullname
import biothings.utils.aws as aws
from biothings.utils.dataload import update_dict_recur
from biothings.utils.jsondiff import make as jsondiff
from biothings.utils.loggers import get_logger
from biothings.utils.manager import BaseManager, BaseStatusRegisterer
from biothings.utils.backend import DocMongoBackend
from biothings import config as btconfig
from biothings.utils.hub import publish_data_version, template_out
from biothings.hub.databuild.backend import generate_folder, create_backend
from biothings.hub import RELEASEMANAGER_CATEGORY, RELEASER_CATEGORY
from biothings.hub.datarelease.releasenote import ReleaseNoteTxt
from biothings.hub.datarelease import set_pending_to_publish
from biothings.hub.databuild.buildconfig import AutoBuildConfig
logging = btconfig.logger
class PublisherException(Exception):
pass
class BasePublisher(BaseManager, BaseStatusRegisterer):
def __init__(self, envconf, log_folder, es_backups_folder, *args,
**kwargs):
super().__init__(*args, **kwargs)
self.envconf = envconf
self.log_folder = log_folder
self.es_backups_folder = es_backups_folder
self.ti = time.time()
self.setup()
def clean_stale_status(self):
src_build = get_src_build()
for build in src_build.find():
dirty = False
for job in build.get("jobs", []):
if job.get("status", "").endswith("publishing"):
logging.warning(
"Found stale build '%s', marking publish status as 'canceled'"
% build["_id"])
job["status"] = "canceled"
dirty = True
if dirty:
src_build.replace_one({"_id": build["_id"]}, build)
@property
def category(self):
return RELEASER_CATEGORY
@property
def collection(self):
return get_src_build()
def setup(self):
self.setup_log()
def setup_log(self):
self.logger, self.logfile = get_logger(self.category, self.log_folder)
def get_predicates(self):
return []
def get_pinfo(self):
pinfo = {
"category": self.category,
"source": "",
"step": "",
"description": ""
}
preds = self.get_predicates()
if preds:
pinfo["__predicates__"] = preds
return pinfo
def register_status(self,
bdoc,
status,
transient=False,
init=False,
**extra):
BaseStatusRegisterer.register_status(self,
bdoc,
"publish",
status,
transient=transient,
init=init,
**extra)
def load_build(self, key_name, stage=None):
if stage is None:
return self.load_doc(key_name, "snapshot") or self.load_doc(
key_name, "diff")
else:
return self.load_doc(key_name, stage)
def template_out_conf(self, build_doc):
try:
strconf = template_out(json.dumps(self.envconf), build_doc)
return json.loads(strconf)
except Exception as e:
self.logger.exception("Coudn't template out configuration: %s" % e)
raise
def create_bucket(self, bucket_conf, credentials):
aws_key = credentials.get("access_key")
aws_secret = credentials.get("secret_key")
aws.create_bucket(name=bucket_conf["bucket"],
region=bucket_conf["region"],
aws_key=aws_key,
aws_secret=aws_secret,
acl=bucket_conf.get("acl"),
ignore_already_exists=True)
if bucket_conf.get("website"):
aws.set_static_website(bucket_conf["bucket"],
aws_key=aws_key,
aws_secret=aws_secret)
def trigger_release_note(self, doc, **kwargs):
new_db_col_names = doc["_id"]
old_db_col_names = get_previous_collection(new_db_col_names)
self.release_note(old_db_col_names, new_db_col_names, **kwargs)
def get_pre_post_previous_result(self, build_doc, key_value):
raise NotImplementedError("implement me in sub-class")
def run_pre_post(self, key, stage, key_value, repo_conf, build_doc):
previous_result = self.get_pre_post_previous_result(
build_doc, key_value)
steps = repo_conf.get("publish", {}).get(stage, {}).get(
key, [])
assert isinstance(
steps,
list), "'%s' stage must be a list, got: %s" % (stage, repr(steps))
action_done = []
for step_conf in steps:
try:
action = step_conf["action"]
self.logger.info("Processing stage '%s-%s-publish': %s" %
(stage, key, action))
found = False
for tpl in [
"step_%(stage)s_publish_%(action)s",
"step_publish_%(action)s", "step_%(stage)s_%(action)s"
]:
methname = tpl % {"stage": stage, "action": action}
if hasattr(self, methname):
found = True
previous_result = getattr(self,
methname)(step_conf,
build_doc,
previous_result)
break
if not found:
previous_result = getattr(self, "step_%s" % action)(
step_conf, build_doc, previous_result)
action_done.append({"name": action, "result": previous_result})
except AttributeError:
raise ValueError("No such %s-%s-publish step '%s'" %
(stage, key, action))
return action_done
def step_archive(self, step_conf, build_doc, previous):
archive_name = step_conf["name"]
archive_file = os.path.join(self.es_backups_folder, archive_name)
if step_conf["format"] == "tar.xz":
tarcmd = [
"tar",
"cJf",
archive_file,
"-C",
self.es_backups_folder,
previous["settings"]["location"],
]
if step_conf.get("split"):
part = "%s.part." % archive_file
tarcmd[2] = "-"
tarps = subprocess.Popen(tarcmd, stdout=subprocess.PIPE)
splitcmd = ["split", "-", "-b", step_conf["split"], "-d", part]
ps = subprocess.Popen(splitcmd, stdin=tarps.stdin)
ret_code = ps.wait()
if ret_code != 0:
raise PublisherException("Archiving failed, code: %s" %
ret_code)
else:
flist = glob.glob("%s.*" % part)
if len(flist) == 1:
os.rename(flist[0], archive_file)
self.logger.info("Tried to split archive, but only one part was produced,"
+ "returning single archive file: %s" % archive_file)
return archive_file
else:
jsonfile = "%s.json" % outfile
json.dump({
"filename": outfile,
"parts": flist
}, open(jsonfile, "w"))
self.logger.info(
"Archive split into %d parts, metadata stored in: %s"
% (len(flist), jsonfile))
return jsonfile
else:
subprocess.check_output(tarcmd)
self.logger.info("Archive: %s" % archive_file)
return archive_file
else:
raise ValueError("Only 'tar.xz' format supported for now, got %s" %
repr(step_conf["format"]))
def step_upload(self, step_conf, build_doc, previous):
if step_conf["type"] == "s3":
return self.step_upload_s3(step_conf, build_doc, previous)
else:
raise ValueError(
"Only 's3' upload type supported for now, got %s" %
repr(step_conf["type"]))
def step_upload_s3(self, step_conf, build_doc, previous):
aws_key = self.envconf.get("cloud", {}).get("access_key")
aws_secret = self.envconf.get("cloud", {}).get("secret_key")
self.create_bucket(bucket_conf=step_conf,
credentials=self.envconf.get("cloud", {}))
if step_conf.get("file"):
basename = step_conf["file"]
uploadfunc = aws.send_s3_big_file
elif step_conf.get("folder"):
basename = step_conf["folder"]
uploadfunc = aws.send_s3_folder
else:
raise ValueError(
"Can't find 'file' or 'folder' key, don't know what to upload")
archive_path = os.path.join(self.es_backups_folder, basename)
self.logger.info("Uploading: %s" % archive_path)
uploadfunc(archive_path,
os.path.join(step_conf["base_path"], basename),
overwrite=step_conf.get("overwrite", False),
aws_key=aws_key,
aws_secret=aws_secret,
s3_bucket=step_conf["bucket"])
return {
"type": "s3",
"key": basename,
"base_path": step_conf["base_path"],
"bucket": step_conf["bucket"]
}
def get_release_note_filename(self, build_version):
return "release_%s" % build_version
def publish_release_notes(self,
release_folder,
build_version,
s3_release_folder,
s3_release_bucket,
aws_key,
aws_secret,
prefix="release_"):
release_note = self.get_release_note_filename(build_version)
s3basedir = os.path.join(s3_release_folder, build_version)
notes = glob.glob(os.path.join(release_folder, "%s.*" % release_note))
self.logger.info(
"Uploading release notes from '%s' to s3 folder '%s'" %
(notes, s3basedir))
for note in notes:
if os.path.exists(note):
s3key = os.path.join(s3basedir, os.path.basename(note))
aws.send_s3_file(
note,
s3key,
aws_key=self.envconf.get("cloud", {}).get("access_key"),
aws_secret=self.envconf.get("cloud", {}).get("secret_key"),
s3_bucket=s3_release_bucket,
overwrite=True)
rel_txt_url = aws.get_s3_url(
os.path.join(s3basedir, "%s.txt" % release_note),
aws_key=self.envconf.get("cloud", {}).get("access_key"),
aws_secret=self.envconf.get("cloud", {}).get("secret_key"),
s3_bucket=s3_release_bucket)
rel_json_url = aws.get_s3_url(
os.path.join(s3basedir, "%s.json" % release_note),
aws_key=self.envconf.get("cloud", {}).get("access_key"),
aws_secret=self.envconf.get("cloud", {}).get("secret_key"),
s3_bucket=s3_release_bucket)
urls = {}
if rel_txt_url:
urls["txt"] = {"url": rel_txt_url}
if rel_json_url:
urls["json"] = {"url": rel_json_url}
return urls
class SnapshotPublisher(BasePublisher):
def __init__(self, snapshot_manager, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snapshot_manager = snapshot_manager
self.setup()
def get_pre_post_previous_result(self, build_doc, key_value):
assert build_doc["snapshot"][key_value], "previous step not successful"
assert build_doc["snapshot"][key_value]["created_at"]
previous_result = build_doc["snapshot"][key_value]["conf"]["repository"]
return previous_result
def run_pre_publish_snapshot(self, snapshot_name, repo_conf, build_doc):
return self.run_pre_post("snapshot", "pre", snapshot_name, repo_conf, build_doc)
def publish(self,
snapshot,
build_name=None,
previous_build=None,
steps=["pre", "meta", "post"]):
try:
if build_name:
bdoc = self.load_build(build_name)
else:
bdoc = self.load_build(snapshot, "snapshot")
except Exception as e:
self.exception(
"Error loading build document using snapshot named '%s': %s" %
(snapshot, e))
raise
if isinstance(bdoc, list):
raise PublisherException(
"Snapshot '%s' found in more than one builds: %s." %
(snapshot, [d["_id"] for d in bdoc])
+ " Specify which one with 'build_name'")
if type(steps) == str:
steps = [steps]
if not bdoc:
raise PublisherException(
"No build document found with a snapshot name '%s' associated to it"
% snapshot)
self.envconf = self.template_out_conf(bdoc)
release_folder = None
if previous_build is None and "release_note" in bdoc:
previous_build = list(bdoc["release_note"].keys())
if len(previous_build) != 1:
raise PublisherException("More than one release note found, "
+ "generated with following builds: %s" % previous_build)
else:
previous_build = previous_build.pop()
assert previous_build, "Couldn't find previous build %s" % bdoc.keys()
release_folder = generate_folder(btconfig.RELEASE_PATH, previous_build,
bdoc["_id"])
assert release_folder, "No release folder found, can't publish"
s3_release_folder = self.envconf["release"]["folder"]
s3_release_bucket = self.envconf["release"]["bucket"]
self.create_bucket(bucket_conf=self.envconf["release"],
credentials=self.envconf.get("cloud", {}))
got_error = None
@asyncio.coroutine
def do():
jobs = []
pinfo = self.get_pinfo()
pinfo["step"] = "publish"
pinfo["source"] = snapshot
def done(f, step):
try:
res = f.result()
self.register_status(bdoc,
"success",
job={
"step": step,
"result": res
},
publish={
"full": {
snapshot: {
"conf": self.envconf,
step: res
}
}
})
except Exception as e:
nonlocal got_error
got_error = e
self.register_status(bdoc,
"failed",
job={
"step": step,
"err": str(e)
},
publish={
"full": {
snapshot: {
"conf": self.envconf,
step: {
"err": str(e)
}
}
}
})
self.logger.exception(
"Error while running pre-publish: %s" % got_error)
if "_meta" not in bdoc:
raise PublisherException(
"No metadata (_meta) found in build document")
if "pre" in steps:
pinfo["step"] = "pre"
self.logger.info("Running pre-publish step")
self.register_status(bdoc,
"pre",
transient=True,
init=True,
job={"step": "pre"},
publish={"full": {
snapshot: {}
}})
job = yield from self.job_manager.defer_to_thread(
pinfo,
partial(self.pre_publish, snapshot, self.envconf, bdoc))
job.add_done_callback(partial(done, step="pre"))
yield from job
if got_error:
raise got_error
jobs.append(job)
if "meta" in steps:
build_version = bdoc["_meta"]["build_version"]
self.logger.info(
"Generating JSON metadata for full release '%s'" %
build_version)
assert snapshot, "Missing snapshot name information"
if getattr(btconfig, "SKIP_CHECK_VERSIONS", None):
self.logger.info(
"SKIP_CHECK_VERSIONS %s, no version check will be performed on full metadata"
% repr(btconfig.SKIP_CHECK_VERSIONS))
else:
assert getattr(
btconfig, "BIOTHINGS_VERSION", "master"
) != "master", "I won't publish data refering BIOTHINGS_VERSION='master'"
assert getattr(
btconfig, "APP_VERSION", "master"
) != "master", "I won't publish data refering APP_VERSION='master'"
assert getattr(btconfig, "STANDALONE_VERSION",
None), "STANDALONE_VERSION not defined"
full_meta = {
"type": "full",
"build_version": build_version,
"target_version": build_version,
"release_date": datetime.now().astimezone().isoformat(),
"app_version": btconfig.APP_VERSION,
"biothings_version": btconfig.BIOTHINGS_VERSION,
"standalone_version": btconfig.STANDALONE_VERSION,
"metadata": {
"repository": bdoc["snapshot"][snapshot]["conf"]["repository"],
"snapshot_name": snapshot,
}
}
if bdoc["snapshot"][snapshot]["conf"]["repository"]["type"] == "fs":
pre_steps = bdoc.get("publish",
{}).get("full",
{}).get(snapshot,
{}).get("pre", [])
try:
assert pre_steps, "No pre-steps found, expecting pre-upload step"
upload_step = [
step for step in pre_steps
if step["name"] == "upload"
]
assert len(
upload_step
) == 1, "Expecting one pre-upload step, got %s" % repr(
upload_step)
upload_step = upload_step.pop()
res = upload_step["result"]
assert res[
"type"] == "s3", "Only archived uploaded to S3 are currently supported"
url = aws.get_s3_url(
s3key=os.path.join(res["base_path"], res["key"]),
aws_key=self.envconf.get("cloud",
{}).get("access_key"),
aws_secret=self.envconf.get("cloud",
{}).get("secret_key"),
s3_bucket=res["bucket"])
full_meta["metadata"]["archive_url"] = url
except Exception as e:
raise PublisherException("Repository for snapshot '%s' is type 'fs' but " % snapshot
+ "coudln't determine archive URL to publish: %s" % e)
if release_folder:
if os.path.exists(release_folder):
try:
self.register_status(
bdoc,
"publishing",
transient=True,
init=True,
job={"step": "release-note"},
publish={"full": {
snapshot: {}
}})
urls = self.publish_release_notes(
release_folder,
build_version,
s3_release_folder,
s3_release_bucket,
aws_key=self.envconf.get("cloud",
{}).get("access_key"),
aws_secret=self.envconf.get(
"cloud", {}).get("secret_key"))
full_meta.setdefault("changes", {})
full_meta["changes"].update(urls)
s3basedir = os.path.join(s3_release_folder,
build_version)
self.register_status(
bdoc,
"success",
job={"step": "release-note"},
publish={
"full": {
snapshot: {
"conf": self.envconf,
"release-note": {
"base_dir": s3basedir,
"bucket": s3_release_bucket,
"url": urls
}
}
}
})
except Exception as e:
self.logger.exception(
"Failed to upload release notes: %s" % e)
self.register_status(
bdoc,
"failed",
job={
"step": "release-note",
"err": str(e)
},
publish={
"full": {
snapshot: {
"conf": self.envconf,
"release-note": {
"err": str(e),
"base_dir": s3basedir,
"bucket": s3_release_bucket
},
}
}
})
raise
else:
self.logger.info(
"No release_folder found, no release notes will be part of the publishing"
)
os.makedirs(release_folder)
try:
self.register_status(bdoc,
"publishing",
transient=True,
init=True,
job={"step": "metadata"},
publish={"full": {
snapshot: {}
}})
build_info = "%s.json" % build_version
build_info_path = os.path.join(btconfig.RELEASE_PATH,
build_info)
json.dump(full_meta, open(build_info_path, "w"))
local_ts = dtparse(bdoc["_meta"]["build_date"])
utc_epoch = int(time.mktime(local_ts.timetuple()))
str_utc_epoch = str(utc_epoch)
s3key = os.path.join(s3_release_folder, build_info)
aws.send_s3_file(
build_info_path,
s3key,
aws_key=self.envconf.get("cloud",
{}).get("access_key"),
aws_secret=self.envconf.get("cloud",
{}).get("secret_key"),
s3_bucket=s3_release_bucket,
metadata={"lastmodified": str_utc_epoch},
overwrite=True)
url = aws.get_s3_url(
s3key,
aws_key=self.envconf.get("cloud",
{}).get("access_key"),
aws_secret=self.envconf.get("cloud",
{}).get("secret_key"),
s3_bucket=s3_release_bucket)
self.logger.info(
"Full release metadata published for version: '%s'" %
url)
full_info = {
"build_version": full_meta["build_version"],
"require_version": None,
"target_version": full_meta["target_version"],
"type": full_meta["type"],
"release_date": full_meta["release_date"],
"url": url
}
publish_data_version(
s3_release_bucket,
s3_release_folder,
full_info,
aws_key=self.envconf.get("cloud",
{}).get("access_key"),
aws_secret=self.envconf.get("cloud",
{}).get("secret_key"))
self.logger.info("Registered version '%s'" %
(build_version))
self.register_status(bdoc,
"success",
job={"step": "metadata"},
publish={
"full": {
snapshot: {
"conf": self.envconf,
"metadata": full_info
}
}
})
except Exception as e:
self.logger.exception(
"Failed to upload snapshot metadata: %s" % e)
self.register_status(bdoc,
"failed",
job={
"step": "metadata",
"err": str(e)
},
publish={
"full": {
snapshot: {
"conf": self.envconf,
"metadata": {
"err": str(e)
}
}
}
})
raise
if "post" in steps:
pinfo["step"] = "post"
self.logger.info("Running post-publish step")
self.register_status(bdoc,
"post-publishing",
transient=True,
init=True,
job={"step": "post-publish"},
publish={"fulle": {
snapshot: {}
}})
job = yield from self.job_manager.defer_to_thread(
pinfo,
partial(self.post_publish, snapshot, self.envconf, bdoc))
job.add_done_callback(partial(done, step="post"))
yield from job
jobs.append(job)
def published(f):
try:
res = f.result()
self.logger.info("Snapshot '%s' uploaded to S3: %s" %
(snapshot, res),
extra={"notify": True})
except Exception as e:
self.logger.exception(
"Failed to upload snapshot '%s' uploaded to S3: %s" %
(snapshot, e),
extra={"notify": True})
if jobs:
yield from asyncio.wait(jobs)
task = asyncio.gather(*jobs)
task.add_done_callback(published)
yield from task
def done(f):
try:
_ = f.result()
except Exception as e:
self.logger.exception("Unable to publish full release: %s" % e)
raise
task = asyncio.ensure_future(do())
task.add_done_callback(done)
return task
def run_post_publish_snapshot(self, snapshot_name, repo_conf, build_doc):
return self.run_pre_post("snapshot", "post", snapshot_name, repo_conf,
build_doc)
def post_publish(self, snapshot_name, repo_conf, build_doc):
return self.run_post_publish_snapshot(snapshot_name, repo_conf,
build_doc)
|
Apache License 2.0
|
simon-bc/bcex
|
bcex/examples/candles_strategy.py
|
CandlesStrategy.is_new_candle
|
python
|
def is_new_candle(self, candles):
last_timestamp = candles.iloc[-1].name
if last_timestamp > self.latest_timestamp:
return True
return False
|
Checks if there is a new candle in the dataframe
Parameters
----------
candles : pd.DataFrame
dataframe of candles
Returns
-------
is_new_candle : bool
|
https://github.com/simon-bc/bcex/blob/e0578631805472c8bfe421d2729b2e2af4f9e438/bcex/examples/candles_strategy.py#L151-L166
|
import logging
from datetime import datetime, timedelta
import pandas as pd
import pytz
from bcex.core.orders import OrderSide, OrderType
from bcex.core.utils import datetime2unixepoch, unixepoch2datetime
from bcex.core.websocket_client import Environment, Channel
from bcex.examples.trader import BaseTrader
from requests import get
class CandlesStrategy(BaseTrader):
CHANNELS = Channel.PRIVATE + [Channel.TICKER, Channel.SYMBOLS, Channel.PRICES]
def __init__(
self,
symbol,
start_date,
heikin_ashi=False,
granularity=3600,
refresh_rate=60,
env=Environment.PROD,
**kwargs,
):
channel_kwargs = {"prices": {"granularity": granularity}}
super().__init__(
symbol,
refresh_rate=refresh_rate,
env=env,
channels_kwargs=channel_kwargs,
**kwargs,
)
self.heikin_ashi = heikin_ashi
self.granularity = granularity
self._historical_candles = None
self.start_date = start_date
self.latest_timestamp = None
def get_historical_candles(self):
end_date = datetime.now(pytz.UTC)
payload = {
"symbol": self.symbol,
"start": datetime2unixepoch(self.start_date),
"end": datetime2unixepoch(end_date),
"granularity": self.granularity,
}
prices_url = "https://api.blockchain.com/nabu-gateway/markets/exchange/prices?"
r = get(prices_url, params=payload)
res = r.json()
df_res = pd.DataFrame(
{
unixepoch2datetime(rec[0]): {
"open": rec[1],
"high": rec[2],
"low": rec[3],
"close": rec[4],
}
for rec in res["prices"]
}
).T
return df_res.sort_index()
@property
def historical_candles(self):
if self._historical_candles is None:
self._historical_candles = self.get_historical_candles()
return self._historical_candles
def _check_candle_is_finished(self, rec):
return unixepoch2datetime(rec[0]) + timedelta(
seconds=self.granularity
) < datetime.now(pytz.UTC)
def get_latest_candles(self):
res = self.exchange.get_candles(self.symbol)
if res:
df_res = pd.DataFrame(
{
unixepoch2datetime(rec[0]): {
"open": rec[1],
"high": rec[2],
"low": rec[3],
"close": rec[4],
}
for rec in res
if self._check_candle_is_finished(rec)
}
).T
return df_res.sort_index()
return pd.DataFrame()
@property
def live_candles(self):
return self.get_latest_candles()
def get_candle_df(self):
live_candles = self.live_candles
if live_candles.empty:
return self.historical_candles.copy()
min_time = live_candles.iloc[0].name
historical_candles = self.historical_candles[
self.historical_candles.index < min_time
]
df = pd.concat([historical_candles, live_candles]).sort_index()
return df
|
MIT License
|
jansel/opentuner
|
opentuner/search/manipulator.py
|
PrimitiveParameter.copy_value
|
python
|
def copy_value(self, src, dst):
self.set_value(dst, self.get_value(src))
|
copy the value of this parameter from src to dst config
|
https://github.com/jansel/opentuner/blob/070c5cef6d933eb760a2f9cd5cd08c95f27aee75/opentuner/search/manipulator.py#L462-L464
|
from __future__ import division
import abc
import argparse
import collections
import copy
import hashlib
import inspect
import json
import logging
import math
import os
import pickle
import random
import sys
from builtins import map
from builtins import object
from builtins import range
from builtins import str
from functools import reduce
import numpy
from fn import _
from future.utils import with_metaclass
from past.utils import old_div
log = logging.getLogger(__name__)
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--list-params', '-lp',
help='list available parameter classes')
class ConfigurationManipulatorBase(with_metaclass(abc.ABCMeta, object)):
FILE_FORMATS = {'default': json, 'json': json,
'pickle': pickle, 'pk': pickle}
def validate(self, config):
return all(map(_.validate(config), self.parameters(config)))
def normalize(self, config):
for param in self.parameters(config):
param.normalize(config)
def set_search_driver(self, search_driver):
pass
def copy(self, config):
return copy.deepcopy(config)
def parameters_dict(self, config):
return dict([(p.name, p) for p in self.parameters(config)])
def param_names(self, *args):
return sorted(reduce(set.union,
[set(map(_.name, self.parameters(cfg)))
for cfg in args]))
def linear_config(self, a, cfg_a, b, cfg_b, c, cfg_c):
dst = self.copy(cfg_a)
dst_params = self.proxy(dst)
for k in self.param_names(dst, cfg_a, cfg_b, cfg_c):
dst_params[k].op4_set_linear(cfg_a, cfg_b, cfg_c, a, b, c)
return dst
def _get_serializer(self, filename, format=None):
if format is None:
format = os.path.splitext(filename)[1].lower().replace('.', '')
if format not in self.FILE_FORMATS:
serializer = self.FILE_FORMATS['default']
if len(self.FILE_FORMATS) > 1:
log.warning('Unknown file format "%s", using "%s" instead', format,
serializer.__name__)
else:
serializer = self.FILE_FORMATS[format]
return serializer
def save_to_file(self, cfg, filename, format=None, mode="w"):
with open(filename, mode) as fd:
self._get_serializer(filename, format).dump(cfg, fd)
def load_from_file(self, filename, format=None):
with open(filename, 'rb') as fd:
return self._get_serializer(filename, format).load(fd)
def proxy(self, cfg):
return ManipulatorProxy(self, cfg)
@abc.abstractmethod
def random(self):
return
@abc.abstractmethod
def parameters(self, config):
return list()
@abc.abstractmethod
def hash_config(self, config):
return
class ConfigurationManipulator(ConfigurationManipulatorBase):
def __init__(self, params=None, config_type=dict, seed_config=None, **kwargs):
if params is None:
params = []
self.params = list(params)
self.config_type = config_type
self.search_driver = None
self._seed_config = seed_config
super(ConfigurationManipulator, self).__init__(**kwargs)
for p in self.params:
p.parent = self
def add_parameter(self, p):
p.set_parent(self)
self.params.append(p)
sub_params = p.sub_parameters()
for sp in sub_params:
sp.set_parent(p)
self.params.extend(sub_params)
def set_search_driver(self, search_driver):
self.search_driver = search_driver
def seed_config(self):
if self._seed_config:
cfg = copy.deepcopy(self._seed_config)
else:
cfg = self.config_type()
for p in self.params:
if not isinstance(p.name, str) or '/' not in p.name:
cfg[p.name] = p.seed_value()
return cfg
def random(self):
cfg = self.seed_config()
for p in self.parameters(cfg):
p.op1_randomize(cfg)
return cfg
def parameters(self, config):
if type(config) is not self.config_type:
log.error("wrong type, expected %s got %s",
str(self.config_type),
str(type(config)))
raise TypeError()
return self.params
def parameters_to_json(self):
def param_info_to_json(param, sub_parameters):
sub_parameter_counts = {}
if isinstance(param, str):
param_name = param
else:
param_name = param.__class__.__name__
out = ['[', param_name, ',{']
if len(sub_parameters) > 0:
for sp in sub_parameters:
spout = param_info_to_json(sp, sp.sub_parameters())
sub_parameter_counts[spout] = sub_parameter_counts.get(spout, 0) + 1
for sp in sorted(sub_parameter_counts):
out.append(sp)
out.append(':')
out.append(str(sub_parameter_counts[sp]))
out.append(',')
out.pop()
out.append('}]')
return ''.join(out)
params = [p for p in self.params if p.parent is self]
return param_info_to_json(self, params)
def hash_config(self, config):
m = hashlib.sha256()
params = list(self.parameters(config))
params.sort(key=_.name)
for i, p in enumerate(params):
m.update(str(p.name).encode())
m.update(p.hash_value(config))
m.update(str(i).encode())
m.update(b"|")
return m.hexdigest()
def search_space_size(self):
return reduce(_ * _, [x.search_space_size() for x in self.params])
def difference(self, cfg1, cfg2):
cfg = self.copy(cfg1)
for param in self.parameters(cfg1):
if param.is_primitive(cfg1):
param.set_value(cfg, param.get_value(cfg1) - param.get_value(cfg2))
else:
pass
return cfg
def applySVs(self, cfg, sv_map, args, kwargs):
param_dict = self.parameters_dict(cfg)
for pname in self.param_names(cfg):
param = param_dict[pname]
getattr(param, sv_map[pname])(cfg, *args[pname], **kwargs[pname])
class Parameter(with_metaclass(abc.ABCMeta, object)):
def __init__(self, name):
self.name = name
self.parent = None
super(Parameter, self).__init__()
def _to_storage_type(self, val):
return val
def _from_storage_type(self, sval):
return sval
def _read_node(self, config):
node = config
if not isinstance(self.name, str):
return node, self.name
name_parts = self.name.split('/')
for part in name_parts[:-1]:
if isinstance(node, list):
part = int(part)
node = node[part]
part = name_parts[-1]
if isinstance(node, list):
part = int(part)
return node, part
def _get(self, config):
node, part = self._read_node(config)
return self._from_storage_type(node[part])
def _set(self, config, v):
node, part = self._read_node(config)
node[part] = self._to_storage_type(v)
def set_parent(self, manipulator):
self.parent = manipulator
def validate(self, config):
return True
def is_primitive(self, ignored=None):
return isinstance(self, PrimitiveParameter)
def is_permutation(self, ignored=None):
return isinstance(self, PermutationParameter)
def manipulators(self, config):
return [self.op1_randomize]
def normalize(self, config):
pass
def sub_parameters(self):
return []
@abc.abstractmethod
def op1_randomize(self, cfg):
pass
@abc.abstractmethod
def seed_value(self):
return
@abc.abstractmethod
def copy_value(self, src, dst):
pass
@abc.abstractmethod
def same_value(self, cfg1, cfg2):
return
@abc.abstractmethod
def hash_value(self, config):
return
@abc.abstractmethod
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
pass
def search_space_size(self):
return 1
def op1_nop(self, cfg):
pass
def op3_swarm(self, cfg, cfg1, cfg2, c, c1, c2, *args, **kwargs):
self.opn_stochastic_mix(cfg, [cfg, cfg1, cfg2], [c, c1, c2])
def opn_stochastic_mix(self, cfg, cfgs, ratio, *args, **kwargs):
assert len(cfgs) == len(ratio)
r = random.random()
c = old_div(numpy.array(ratio, dtype=float), sum(ratio))
for i in range(len(c)):
if r < sum(c[:i + 1]):
self.copy_value(cfg, cfgs[i])
break
class PrimitiveParameter(with_metaclass(abc.ABCMeta, Parameter)):
def __init__(self, name, value_type=float, **kwargs):
self.value_type = value_type
super(PrimitiveParameter, self).__init__(name, **kwargs)
def hash_value(self, config):
self.normalize(config)
return hashlib.sha256(repr(self.get_value(config)).encode('utf-8')).hexdigest().encode('utf-8')
|
MIT License
|
iterative/dvc
|
dvc/objects/db/index.py
|
ObjectDBIndex.dir_hashes
|
python
|
def dir_hashes(self):
yield from (hash_ for hash_, is_dir in self.index.items() if is_dir)
|
Iterate over .dir hashes stored in the index.
|
https://github.com/iterative/dvc/blob/3a100382bc5d50a4f1243b1c5d894bb5d7058dbf/dvc/objects/db/index.py#L105-L107
|
import logging
import os
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Iterable, Optional, Set
from ..errors import ObjectDBError
if TYPE_CHECKING:
from dvc.types import StrPath
logger = logging.getLogger(__name__)
class ObjectDBIndexBase(ABC):
@abstractmethod
def __init__(
self, tmp_dir: "StrPath", name: str, dir_suffix: Optional[str] = None
):
pass
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def __contains__(self, hash_):
pass
def hashes(self):
return iter(self)
@abstractmethod
def dir_hashes(self):
pass
@abstractmethod
def clear(self):
pass
@abstractmethod
def update(self, dir_hashes: Iterable[str], file_hashes: Iterable[str]):
pass
@abstractmethod
def intersection(self, hashes: Set[str]):
pass
class ObjectDBIndexNoop(ObjectDBIndexBase):
def __init__(
self, tmp_dir: "StrPath", name: str, dir_suffix: Optional[str] = None
):
pass
def __iter__(self):
return iter([])
def __contains__(self, hash_):
return False
def dir_hashes(self):
return []
def clear(self):
pass
def update(self, dir_hashes: Iterable[str], file_hashes: Iterable[str]):
pass
def intersection(self, hashes: Set[str]):
return []
class ObjectDBIndex(ObjectDBIndexBase):
INDEX_SUFFIX = ".idx"
INDEX_DIR = "index"
def __init__(
self, tmp_dir: "StrPath", name: str, dir_suffix: Optional[str] = None
):
from diskcache import Index
from dvc.fs.local import LocalFileSystem
from dvc.utils.fs import makedirs
self.index_dir = os.path.join(tmp_dir, self.INDEX_DIR, name)
makedirs(self.index_dir, exist_ok=True)
self.fs = LocalFileSystem()
self.index = Index(self.index_dir)
if not dir_suffix:
dir_suffix = self.fs.CHECKSUM_DIR_SUFFIX
self.dir_suffix = dir_suffix
def __iter__(self):
return iter(self.index)
def __contains__(self, hash_):
return hash_ in self.index
|
Apache License 2.0
|
rucio/rucio
|
lib/rucio/rse/protocols/globus.py
|
GlobusRSEProtocol.lfns2pfns
|
python
|
def lfns2pfns(self, lfns):
pfns = {}
prefix = self.attributes['prefix']
if not prefix.startswith('/'):
prefix = ''.join(['/', prefix])
if not prefix.endswith('/'):
prefix = ''.join([prefix, '/'])
lfns = [lfns] if isinstance(lfns, dict) else lfns
for lfn in lfns:
scope, name = lfn['scope'], lfn['name']
if 'path' in lfn and lfn['path'] is not None:
pfns['%s:%s' % (scope, name)] = ''.join([prefix, lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:]])
else:
pfns['%s:%s' % (scope, name)] = ''.join([prefix, self._get_path(scope=scope, name=name)])
return pfns
|
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
|
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/rse/protocols/globus.py#L53-L77
|
from __future__ import print_function
import logging
from six import string_types
from six.moves.urllib.parse import urlparse
from rucio.common import exception
from rucio.common.extra import import_extras
from rucio.core.rse import get_rse_attribute
from rucio.rse.protocols.protocol import RSEProtocol
from rucio.transfertool.globus_library import get_transfer_client, send_delete_task, send_bulk_delete_task
EXTRA_MODULES = import_extras(['globus_sdk'])
if EXTRA_MODULES['globus_sdk']:
from globus_sdk import TransferAPIError
class GlobusRSEProtocol(RSEProtocol):
def __init__(self, protocol_attr, rse_settings, logger=logging.log):
super(GlobusRSEProtocol, self).__init__(protocol_attr, rse_settings, logger=logger)
self.globus_endpoint_id = get_rse_attribute(key='globus_endpoint_id', rse_id=self.rse.get('id'))
self.logger = logger
|
Apache License 2.0
|
autopi-io/autopi-core
|
src/salt/base/ext/_modules/clock.py
|
set
|
python
|
def set(value, adjust_system_clock=False):
ret = {}
cmd = ["timedatectl"]
if adjust_system_clock:
cmd.append("--adjust-system-clock")
cmd.append("set-time '{:s}'".format(value))
res = __salt__["cmd.run_all"](" ".join(cmd))
if res["retcode"] != 0:
raise salt.exceptions.CommandExecutionError(res["stderr"])
return ret
|
Set system time.
Arguments:
- value (str): Time string to set.
Optional arguments:
- adjust_system_clock (bool): Default is 'False'.
|
https://github.com/autopi-io/autopi-core/blob/db50b2d8073af2d0f50a7c83b7cb370491749e2d/src/salt/base/ext/_modules/clock.py#L39-L61
|
import logging
import salt.exceptions
__virtualname__ = "clock"
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def help():
return __salt__["sys.doc"](__virtualname__)
def status():
ret = {}
res = __salt__["cmd.run"]("timedatectl status")
pairs = (l.split(": ") for l in res.splitlines())
for k, v in pairs:
ret[k.strip().lower().replace(" ", "_")] = v.strip()
return ret
|
Apache License 2.0
|
betterlife/betterlifepsi
|
psi/app/views/base.py
|
ModelViewWithAccess.edit_view
|
python
|
def edit_view(self):
model, return_url = self.get_model_return_url()
if model is not None and not model.can_edit():
flash(gettext('You are not allowed to edit this object'))
return redirect(return_url)
return super(ModelViewWithAccess, self).edit_view()
|
Edit model view with model specific can_edit support
Whether the model could be edit will be decided by model.
|
https://github.com/betterlife/betterlifepsi/blob/ce44b11749553d4c685d3ab76ec2fcf0ffabe25e/psi/app/views/base.py#L144-L153
|
from flask_admin import expose
from flask_babelex import gettext
from flask_babelex import lazy_gettext
from flask import url_for, request, flash, has_request_context
from flask_admin._compat import as_unicode
from flask_admin.contrib.sqla import ModelView
from flask_admin.model.helpers import get_mdict_item_or_list
from flask_security import current_user
from psi.app.utils import get_user_roles, has_organization_field, is_super_admin
from sqlalchemy import func
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from wtforms import ValidationError
class ModelViewWithAccess(ModelView):
column_default_sort = ('id', True)
"""
Default placeholder for the search box
"""
search_placeholder = lazy_gettext('Search(Support first letter of Pinyin)')
def is_accessible(self):
return self.can()
@property
def can_create(self):
return self.can(operation='create')
@property
def can_delete(self):
return self.can(operation='delete')
@property
def can_edit(self):
return self.can(operation='edit')
@property
def can_export(self):
return False
@property
def can_view_details(self):
return True
@property
def role_identify(self):
return self.model.__tablename__
def can(self, operation='view'):
obj_id = get_mdict_item_or_list(request.args, 'id') if has_request_context() else None
obj = None if obj_id is None else self.get_one(obj_id)
if obj is None:
same_org = True
else:
if has_organization_field(obj):
if obj.organization is None:
same_org = False
else:
same_org = (obj.organization.id == current_user.organization.id)
else:
same_org = True
role_assigned = same_org and current_user.is_authenticated and (self.role_identify + '_' + operation in get_user_roles())
return (is_super_admin()) or (role_assigned)
def handle_view_exception(self, exc):
from sqlalchemy.exc import InvalidRequestError
if isinstance(exc, ValidationError):
flash(as_unicode(exc), category='error')
return True
elif isinstance(exc, InvalidRequestError):
flash(as_unicode(exc), category='error')
return True
return super(ModelViewWithAccess, self).handle_view_exception(exc)
def get_query(self):
if has_organization_field(self.model):
return self.session.query(self.model).filter(self.model.organization == current_user.organization)
else:
return super(ModelViewWithAccess, self).get_query()
def get_count_query(self):
if has_organization_field(self.model):
return self.session.query(func.count('*')).filter(self.model.organization == current_user.organization)
else:
return super(ModelViewWithAccess, self).get_count_query()
def on_model_change(self, form, model, is_created):
if has_organization_field(self.model):
if is_created:
model.organization = current_user.organization
elif model.organization != current_user.organization:
ValidationError(gettext('You are not allowed to change this record'))
def on_model_delete(self, model):
if has_organization_field(model) and model.organization != current_user.organization:
ValidationError(gettext('You are not allowed to delete this record'))
def _handle_view(self, name, **kwargs):
if not self.is_accessible():
if current_user.is_authenticated:
abort(403)
else:
return redirect(url_for('security.login', next=request.url))
def get_model_return_url(self):
from flask_admin.helpers import get_redirect_target
return_url = get_redirect_target() or self.get_url('.index_view')
model_id = get_mdict_item_or_list(request.args, 'id')
model = self.get_one(model_id)
return model, return_url
@expose('/edit/', methods=('GET', 'POST'))
|
MIT License
|
docusign/docusign-python-client
|
docusign_esign/models/text_custom_field.py
|
TextCustomField.required
|
python
|
def required(self):
return self._required
|
Gets the required of this TextCustomField. # noqa: E501
When set to **true**, the signer is required to fill out this tab # noqa: E501
:return: The required of this TextCustomField. # noqa: E501
:rtype: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/text_custom_field.py#L169-L177
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class TextCustomField(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'configuration_type': 'str',
'error_details': 'ErrorDetails',
'field_id': 'str',
'name': 'str',
'required': 'str',
'show': 'str',
'value': 'str'
}
attribute_map = {
'configuration_type': 'configurationType',
'error_details': 'errorDetails',
'field_id': 'fieldId',
'name': 'name',
'required': 'required',
'show': 'show',
'value': 'value'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._configuration_type = None
self._error_details = None
self._field_id = None
self._name = None
self._required = None
self._show = None
self._value = None
self.discriminator = None
setattr(self, "_{}".format('configuration_type'), kwargs.get('configuration_type', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('field_id'), kwargs.get('field_id', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('required'), kwargs.get('required', None))
setattr(self, "_{}".format('show'), kwargs.get('show', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
@property
def configuration_type(self):
return self._configuration_type
@configuration_type.setter
def configuration_type(self, configuration_type):
self._configuration_type = configuration_type
@property
def error_details(self):
return self._error_details
@error_details.setter
def error_details(self, error_details):
self._error_details = error_details
@property
def field_id(self):
return self._field_id
@field_id.setter
def field_id(self, field_id):
self._field_id = field_id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
|
MIT License
|
maralla/completor.vim
|
pythonx/completor/utils.py
|
check_subseq
|
python
|
def check_subseq(src, target):
if not src:
return 0
score = i = 0
src, target = src.lower(), target.lower()
src_len, target_len = len(src), len(target)
for index, e in enumerate(target):
if src_len - i > target_len - index:
return
if e != src[i]:
continue
if index == 0:
score = -999
score += index
i += 1
if i == src_len:
return score
|
Check if src is a subsequence of target.
|
https://github.com/maralla/completor.vim/blob/6ca5f498afe5fe9c659751aef54ef7f2fdc62414/pythonx/completor/utils.py#L58-L77
|
import logging
import functools
from ._vim import vim_obj
logger = logging.getLogger('completor')
def ignore_exception(fallback=()):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception(e)
return fallback
return wrapper
return deco
class _highlight(object):
HIGHLIGHT_GROUP_MAP = {
'warn': 'WarningMsg',
'error': 'ErrorMsg'
}
def __init__(self, severity):
self.severity = severity
def _echohl(self, group):
vim_obj.command('echohl {}'.format(group))
def __enter__(self):
self._echohl(self.HIGHLIGHT_GROUP_MAP.get(self.severity, 'Normal'))
def __exit__(self, et, ev, tb):
self._echohl('None')
def echo(message, severity='info'):
with _highlight(severity):
vim_obj.command('echo {!r}'.format(message))
|
MIT License
|
daxia4444/djangospider
|
djangospider/mycrawl/run.py
|
Start.__init__
|
python
|
def __init__(self,ulr_parse,kind,sche_queue=sche_queue,task_queue=task_queue,failure_queue=failure_queue,success_queue=success_queue):
logger.info("i init Start class")
self.sche_queue=sche_queue
self.task_queue=task_queue
self.failure_queue=failure_queue
self.success_queue=success_queue
self.ulr_parse=ulr_parse
self.kind=kind
for item in self.ulr_parse:
self.sche_queue.put(item)
self.run()
|
init the spider data
url_parse: the url list of you will crawl
kind: three download ways: 1 -> multhread
2 -> tornado async
3 -> twisted async
sche_queue: this queue will store ulr_address
task_queue: when webpage be crawled, html content and callback function will store in this queue
|
https://github.com/daxia4444/djangospider/blob/2fef0852daebff0487d7d9787950e694027ebf85/djangospider/mycrawl/run.py#L39-L59
|
import os
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%H:%M:%S',)
logger= logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
fh=logging.FileHandler(os.path.join(os.getcwd(),"spider.log"))
fh.setLevel(logging.WARNING)
fh.setFormatter(formatter)
logger.addHandler(fh)
import sys
from crawler.thread_urllib import Download_task
from parse.parse_response import Parse_Task
from multiprocessing import Queue
import multiprocessing
from crawler.async_tornado import download_AsyncHTTPClient
from crawler.async_twisted import Twisted_download
sche_queue=Queue(100)
task_queue=Queue(100)
failure_queue=Queue(100)
success_queue=Queue(100)
class Start():
|
Apache License 2.0
|
ngageoint/sarpy
|
sarpy/consistency/cphd_consistency.py
|
per_channel
|
python
|
def per_channel(method):
method.per_channel = True
return method
|
Decorator to mark check methods as being applicable to each CPHD channel
Parameters
----------
method : Callable
Method to mark
Returns
-------
Callable
Marked input `method`
|
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/consistency/cphd_consistency.py#L152-L168
|
__classification__ = "UNCLASSIFIED"
__author__ = "Nathan Bombaci, Valkyrie"
import logging
import argparse
import collections
import functools
import itertools
import numbers
import os
import re
from typing import List
import numpy as np
import numpy.polynomial.polynomial as npp
from sarpy.geometry import geocoords
import sarpy.consistency.consistency as con
import sarpy.consistency.parsers as parsers
import sarpy.io.phase_history.cphd1_elements.CPHD
import sarpy.io.phase_history.cphd1_elements.utils as cphd1_utils
from sarpy.io.phase_history.cphd_schema import get_schema_path
logger = logging.getLogger(__name__)
try:
import pytest
except ImportError:
pytest = None
logger.critical(
'Functionality for CPHD consistency testing cannot proceed WITHOUT the pytest '
'package')
try:
from lxml import etree
except ImportError:
etree = None
pytest = None
logger.critical(
'Functionality for CPHD consistency testing cannot proceed WITHOUT the lxml '
'package')
try:
import shapely.geometry as shg
have_shapely = True
except ImportError:
have_shapely = False
try:
import networkx as nx
have_networkx = True
except ImportError:
have_networkx = False
INVALID_CHAR_REGEX = re.compile(r'\W')
DEFAULT_SCHEMA = get_schema_path(version='1.0.1')
def strip_namespace(root):
for elem in root.iter():
try:
elem.tag = elem.tag.split('}')[-1]
except (AttributeError, TypeError):
pass
nsmap = root.nsmap
nsmap.pop(None, None)
new_root = etree.Element(root.tag, nsmap)
new_root[:] = root[:]
return new_root
def parse_pvp_elem(elem):
if elem.tag == "AddedPVP":
name = elem.find('Name').text
else:
name = elem.tag
offset = int(elem.find('Offset').text)
size = int(elem.find('Size').text)
dtype = cphd1_utils.binary_format_string_to_dtype(elem.find('Format').text)
return name, {"offset": offset,
"size": size,
"dtype": dtype}
def read_header(file_handle):
file_handle.seek(0, 0)
version = file_handle.readline().decode()
assert version.startswith('CPHD/1.0')
header = sarpy.io.phase_history.cphd1_elements.CPHD.CPHDHeader.from_file_object(file_handle)
return {k:getattr(header, k) for k in header._fields if getattr(header, k) is not None}
|
MIT License
|
paylogic/py2deb
|
setup.py
|
have_environment_marker_support
|
python
|
def have_environment_marker_support():
try:
from pkg_resources import parse_version
from setuptools import __version__
return parse_version(__version__) >= parse_version('0.7.2')
except Exception:
return False
|
Check whether setuptools has support for PEP-426 environment markers.
Based on the ``setup.py`` script of the ``pytest`` package:
https://bitbucket.org/pytest-dev/pytest/src/default/setup.py
|
https://github.com/paylogic/py2deb/blob/28e671231c2dcf7dc7b4963ed75d8cbe4a8778e9/setup.py#L81-L93
|
import codecs
import os
import re
import sys
from setuptools import find_packages, setup
def get_contents(*args):
with codecs.open(get_absolute_path(*args), 'r', 'UTF-8') as handle:
return handle.read()
def get_version(*args):
contents = get_contents(*args)
metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents))
return metadata['version']
def get_install_requires():
install_requires = get_requirements('requirements.txt')
if 'bdist_wheel' not in sys.argv:
if sys.version_info[:2] <= (2, 6) or sys.version_info[:2] == (3, 0):
install_requires.append('importlib')
return sorted(install_requires)
def get_extras_require():
extras_require = {}
if have_environment_marker_support():
expression = ':python_version == "2.6" or python_version == "3.0"'
extras_require[expression] = ['importlib']
return extras_require
def get_absolute_path(*args):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
def get_requirements(*args):
requirements = set()
with open(get_absolute_path(*args)) as handle:
for line in handle:
line = re.sub(r'^#.*|\s#.*', '', line)
if line and not line.isspace():
requirements.add(re.sub(r'\s+', '', line))
return sorted(requirements)
|
MIT License
|
mklan/nx-rom-market
|
lib/python3.5/nx/filesystem.py
|
Savedata.unmount
|
python
|
def unmount(self):
_nx.fsdev_unmount_device('save')
|
Unmounts the savedata filesystem.
|
https://github.com/mklan/nx-rom-market/blob/33613d2177b63df9e0568038ffdf1dd91ad334d8/lib/python3.5/nx/filesystem.py#L157-L159
|
import pathlib
import shutil
import _nx
from . import users
SAVEDATA_BASE_PATH = 'save:/'
ROMFS_BASE_PATH = 'romfs:/'
mounted_romfs = None
mounted_savedata = None
class FileSystem:
def __init__(self, base_path: str):
self.base_path = pathlib.Path(base_path)
def open(self, file_path: str, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
return self.base_path.joinpath(file_path).open(mode=mode, buffering=buffering,
encoding=encoding, errors=errors,
newline=newline)
class MountableFileSystem(FileSystem):
def __init__(self, base_path):
super().__init__(base_path)
@property
def is_mounted(self):
raise NotImplementedError
def open(self, file_path: str, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
if not self.is_mounted:
self.mount()
return super().open(file_path, mode=mode, buffering=buffering, encoding=encoding,
errors=errors, newline=newline)
def mount(self):
raise NotImplementedError
def commit(self):
raise NotImplementedError
def unmount(self):
raise NotImplementedError
def __enter__(self):
self.mount()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
self.unmount()
class RomFS(MountableFileSystem):
def __init__(self, title):
super().__init__(ROMFS_BASE_PATH)
self.title = title
@property
def is_mounted(self):
return self is mounted_romfs
def mount(self):
raise NotImplementedError
def unmount(self):
_nx.fsdev_unmount_device('romfs')
def __exit__(self, exc_type, exc_val, exc_tb):
self.unmount()
class Savedata(MountableFileSystem):
def __init__(self, title, user=None):
super().__init__(SAVEDATA_BASE_PATH)
self.title = title
self.user = user if user is not None else users.active_user
@property
def is_mounted(self):
return self is mounted_savedata
def mount(self):
if self.is_mounted:
return
if self.user is None:
raise RuntimeError("No active user, you need to launch and "
"close a game prior to launching HBL.")
_nx.fs_mount_savedata('save', self.title.id, self.user.id)
global mounted_savedata
mounted_savedata = self
def commit(self):
_nx.fsdev_commit_device('save')
|
MIT License
|
assertpy/assertpy
|
assertpy/string.py
|
StringMixin.is_lower
|
python
|
def is_lower(self):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if self.val != self.val.lower():
return self.error('Expected <%s> to contain only lowercase chars, but did not.' % self.val)
return self
|
Asserts that val is non-empty string and all characters are lowercase (using ``str.lower()``).
Examples:
Usage::
assert_that('foo').is_lower()
Returns:
AssertionBuilder: returns this instance to chain to the next assertion
Raises:
AssertionError: if val is **not** lowercase
|
https://github.com/assertpy/assertpy/blob/c970c6612a80aa10769dc612324630d27019e1b5/assertpy/string.py#L352-L372
|
import sys
import re
import collections
if sys.version_info[0] == 3:
str_types = (str,)
unicode = str
Iterable = collections.abc.Iterable
else:
str_types = (basestring,)
unicode = unicode
Iterable = collections.Iterable
__tracebackhide__ = True
class StringMixin(object):
def is_equal_to_ignoring_case(self, other):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if not isinstance(other, str_types):
raise TypeError('given arg must be a string')
if self.val.lower() != other.lower():
return self.error('Expected <%s> to be case-insensitive equal to <%s>, but was not.' % (self.val, other))
return self
def contains_ignoring_case(self, *items):
if len(items) == 0:
raise ValueError('one or more args must be given')
if isinstance(self.val, str_types):
if len(items) == 1:
if not isinstance(items[0], str_types):
raise TypeError('given arg must be a string')
if items[0].lower() not in self.val.lower():
return self.error('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0]))
else:
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
if i.lower() not in self.val.lower():
missing.append(i)
if missing:
return self.error('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (
self.val, self._fmt_items(items), self._fmt_items(missing)))
elif isinstance(self.val, Iterable):
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
found = False
for v in self.val:
if not isinstance(v, str_types):
raise TypeError('val items must all be strings')
if i.lower() == v.lower():
found = True
break
if not found:
missing.append(i)
if missing:
return self.error('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (
self.val, self._fmt_items(items), self._fmt_items(missing)))
else:
raise TypeError('val is not a string or iterable')
return self
def starts_with(self, prefix):
if prefix is None:
raise TypeError('given prefix arg must not be none')
if isinstance(self.val, str_types):
if not isinstance(prefix, str_types):
raise TypeError('given prefix arg must be a string')
if len(prefix) == 0:
raise ValueError('given prefix arg must not be empty')
if not self.val.startswith(prefix):
return self.error('Expected <%s> to start with <%s>, but did not.' % (self.val, prefix))
elif isinstance(self.val, Iterable):
if len(self.val) == 0:
raise ValueError('val must not be empty')
first = next(iter(self.val))
if first != prefix:
return self.error('Expected %s to start with <%s>, but did not.' % (self.val, prefix))
else:
raise TypeError('val is not a string or iterable')
return self
def ends_with(self, suffix):
if suffix is None:
raise TypeError('given suffix arg must not be none')
if isinstance(self.val, str_types):
if not isinstance(suffix, str_types):
raise TypeError('given suffix arg must be a string')
if len(suffix) == 0:
raise ValueError('given suffix arg must not be empty')
if not self.val.endswith(suffix):
return self.error('Expected <%s> to end with <%s>, but did not.' % (self.val, suffix))
elif isinstance(self.val, Iterable):
if len(self.val) == 0:
raise ValueError('val must not be empty')
last = None
for last in self.val:
pass
if last != suffix:
return self.error('Expected %s to end with <%s>, but did not.' % (self.val, suffix))
else:
raise TypeError('val is not a string or iterable')
return self
def matches(self, pattern):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if not isinstance(pattern, str_types):
raise TypeError('given pattern arg must be a string')
if len(pattern) == 0:
raise ValueError('given pattern arg must not be empty')
if re.search(pattern, self.val) is None:
return self.error('Expected <%s> to match pattern <%s>, but did not.' % (self.val, pattern))
return self
def does_not_match(self, pattern):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if not isinstance(pattern, str_types):
raise TypeError('given pattern arg must be a string')
if len(pattern) == 0:
raise ValueError('given pattern arg must not be empty')
if re.search(pattern, self.val) is not None:
return self.error('Expected <%s> to not match pattern <%s>, but did.' % (self.val, pattern))
return self
def is_alpha(self):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if not self.val.isalpha():
return self.error('Expected <%s> to contain only alphabetic chars, but did not.' % self.val)
return self
def is_digit(self):
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if not self.val.isdigit():
return self.error('Expected <%s> to contain only digits, but did not.' % self.val)
return self
|
BSD 3-Clause New or Revised License
|
aleju/imgaug
|
imgaug/imgaug.py
|
is_np_array
|
python
|
def is_np_array(val):
return isinstance(val, np.ndarray)
|
Check whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
``True`` if the variable is a numpy array. Otherwise ``False``.
|
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/imgaug.py#L205-L222
|
from __future__ import print_function, division, absolute_import
import math
import numbers
import sys
import os
import types
import functools
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
import cv2
import six
import six.moves as sm
import skimage.draw
import skimage.measure
try:
import numba
except ImportError:
numba = None
ALL = "ALL"
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = [
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
_POOLING_KERNELS_CACHE = {}
_NUMBA_INSTALLED = numba is not None
_UINT8_DTYPE = np.dtype("uint8")
class DeprecationWarning(Warning):
def warn(msg, category=UserWarning, stacklevel=2):
import warnings
warnings.warn(msg, category=category, stacklevel=stacklevel)
def warn_deprecated(msg, stacklevel=2):
warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
class deprecated(object):
def __init__(self, alt_func=None, behavior="warn", removed_version=None,
comment=None):
self.alt_func = alt_func
self.behavior = behavior
self.removed_version = removed_version
self.comment = comment
def __call__(self, func):
alt_msg = None
if self.alt_func is not None:
alt_msg = "Use ``%s`` instead." % (self.alt_func,)
rmv_msg = None
if self.removed_version is not None:
rmv_msg = "It will be removed in version %s." % (
self.removed_version,)
comment_msg = None
if self.comment is not None and len(self.comment) > 0:
comment_msg = "%s." % (self.comment.rstrip(". "),)
addendum = " ".join([submsg
for submsg
in [alt_msg, rmv_msg, comment_msg]
if submsg is not None])
@functools.wraps(func)
def wrapped(*args, **kwargs):
import inspect
if hasattr(inspect, "getfullargspec"):
arg_names = inspect.getfullargspec(func)[0]
else:
arg_names = inspect.getargspec(func)[0]
if "self" in arg_names or "cls" in arg_names:
main_msg = "Method ``%s.%s()`` is deprecated." % (
args[0].__class__.__name__, func.__name__)
else:
main_msg = "Function ``%s()`` is deprecated." % (
func.__name__,)
msg = (main_msg + " " + addendum).rstrip(" ").replace("``", "`")
if self.behavior == "warn":
warn_deprecated(msg, stacklevel=3)
elif self.behavior == "raise":
raise DeprecationWarning(msg)
return func(*args, **kwargs)
doc = "**Deprecated**. " + addendum
if wrapped.__doc__ is None:
wrapped.__doc__ = doc
else:
wrapped.__doc__ = doc + "\n\n " + wrapped.__doc__
return wrapped
|
MIT License
|
stencila/hub
|
manager/jobs/models.py
|
Job.status_category
|
python
|
def status_category(self) -> Optional[str]:
return JobStatus.category(self.status) if self.status else None
|
Get the status category of the job.
|
https://github.com/stencila/hub/blob/e696c39213156bb43a098f81286197e919379cdf/manager/jobs/models.py#L786-L790
|
import json
import re
from enum import unique
from typing import Dict, List, Optional
import inflect
import shortuuid
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.humanize.templatetags.humanize import ordinal
from django.core import validators
from django.db import models, transaction
from django.http import HttpRequest
from django.utils import timezone
from django.utils.functional import cached_property
from django_celery_beat.models import PeriodicTask
from accounts.models import Account
from manager.helpers import EnumChoice
from users.models import AnonUser, User
class Zone(models.Model):
account = models.ForeignKey(
Account,
on_delete=models.CASCADE,
related_name="zones",
help_text="The account that this zone is linked to.",
)
name = models.CharField(
max_length=256,
validators=[
validators.RegexValidator(
r"^[a-z][a-z0-9\-]*$",
"Name should start with a lowercase letter and only contain lowercase letters, digits and hyphens",
)
],
help_text="The identifier of the queue the job was posted to.",
)
class Meta:
constraints = [
models.UniqueConstraint(fields=["account", "name"], name="unique_name")
]
def __str__(self) -> str:
return "Zone #{0}:{1}".format(self.id, self.name)
class Queue(models.Model):
NAME_REGEX = r"^([a-z][a-z0-9\-]*)(:[0-9])?(:untrusted)?(:interrupt)?$"
name = models.CharField(max_length=512, help_text="The name of the queue.")
zone = models.ForeignKey(
Zone,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="jobs",
help_text="The zone this job is associated with.",
)
priority = models.IntegerField(
default=0, help_text="The relative priority of jobs placed on the queue."
)
untrusted = models.BooleanField(
default=False,
help_text="Whether or not the queue should be sent jobs which run untrusted code.",
)
interrupt = models.BooleanField(
default=False,
help_text="Whether or not the queue should be sent jobs which can not be interupted."
"False (default): jobs should not be interrupted",
)
def __str__(self):
return "Queue #{0}:{1}".format(self.id, self.name)
@classmethod
def get_or_create(cls, account_name, queue_name):
match = re.search(cls.NAME_REGEX, queue_name)
assert match is not None
account = Account.objects.get(name=account_name)
zone, created = Zone.objects.get_or_create(account=account, name=match.group(1))
priority = match.group(2)
priority = int(priority[1:]) if priority else 0
untrusted = match.group(3) is not None
interrupt = match.group(4) is not None
return cls.objects.get_or_create(
name=queue_name,
zone=zone,
priority=priority,
untrusted=untrusted,
interrupt=interrupt,
)
class Worker(models.Model):
queues = models.ManyToManyField(
Queue,
related_name="workers",
help_text="The queues that this worker is listening to.",
)
created = models.DateTimeField(
auto_now_add=True,
help_text="The time that the worker started (time of the first event for the worker).",
)
started = models.DateTimeField(
null=True,
blank=True,
help_text="The time that the worker started (only recorded on a 'worker-online' event).",
)
updated = models.DateTimeField(
null=True,
blank=True,
help_text="The time that the last heatbeat was received for the worker.",
)
finished = models.DateTimeField(
null=True,
blank=True,
help_text="The time that the worker finished (only recorded on a 'worker-offline' event)",
)
hostname = models.CharField(
max_length=512, help_text="The `hostname` of the worker.",
)
utcoffset = models.IntegerField(
null=True, blank=True, help_text="The `utcoffset` of the worker.",
)
pid = models.IntegerField(
null=True, blank=True, help_text="The `pid` of the worker.",
)
freq = models.FloatField(
null=True, blank=True, help_text="The worker's heatbeat frequency (in seconds)",
)
software = models.CharField(
max_length=256,
null=True,
blank=True,
help_text="The name and version of the worker's software.",
)
os = models.CharField(
max_length=64,
null=True,
blank=True,
help_text="Operating system that the worker is running on.",
)
details = models.JSONField(
null=True,
blank=True,
help_text="Details about the worker including queues and stats"
"See https://docs.celeryproject.org/en/stable/userguide/workers.html#statistics",
)
signature = models.CharField(
max_length=512,
null=True,
blank=True,
help_text="The signature of the worker used to identify it. "
"It is possible, but unlikely, that two or more active workers have the same signature.",
)
FLATLINE_HEARTBEATS = 5
@classmethod
@transaction.atomic
def get_or_create(cls, event: dict):
hostname = event.get("hostname")
utcoffset = event.get("utcoffset")
pid = event.get("pid")
freq = event.get("freq")
software = "{}-{}".format(event.get("sw_ident"), event.get("sw_ver"))
os = event.get("sw_sys")
details = event.get("details", {})
signature = "{hostname}|{utcoffset}|{pid}|{freq}|{software}|{os}".format(
hostname=hostname,
utcoffset=utcoffset,
pid=pid,
freq=freq,
software=software,
os=os,
)
try:
return Worker.objects.filter(signature=signature, finished__isnull=True)[0]
except IndexError:
pass
return Worker.objects.create(
hostname=hostname,
utcoffset=utcoffset,
pid=pid,
freq=freq,
software=software,
os=os,
details=details,
signature=signature,
)
@property
def active(self):
if self.finished:
return False
if self.updated:
return (timezone.now() - self.updated).seconds < (
self.freq * Worker.FLATLINE_HEARTBEATS
)
return True
class WorkerHeartbeat(models.Model):
worker = models.ForeignKey(
Worker,
on_delete=models.CASCADE,
related_name="heartbeats",
help_text="The worker that the heartbeat is for.",
)
time = models.DateTimeField(help_text="The time of the heartbeat.")
clock = models.IntegerField(
help_text="The tick number of the worker's monotonic clock",
)
active = models.IntegerField(help_text="The number of active jobs on the worker.",)
processed = models.IntegerField(
help_text="The number of jobs that have been processed by the worker.",
)
load = models.JSONField(
help_text="An array of the system load over the last 1, 5 and 15 minutes. From os.getloadavg().",
)
@unique
class JobMethod(EnumChoice):
parallel = "parallel"
series = "series"
clean = "clean"
archive = "archive"
zip = "zip"
pull = "pull"
push = "push"
extract = "extract"
convert = "convert"
pin = "pin"
register = "register"
compile = "compile"
build = "build"
execute = "execute"
session = "session"
sleep = "sleep"
@classmethod
def is_compound(cls, method: str) -> bool:
return method in [member.value for member in (cls.parallel, cls.series)]
@unique
class JobStatus(EnumChoice):
WAITING = "WAITING"
DISPATCHED = "DISPATCHED"
PENDING = "PENDING"
RECEIVED = "RECEIVED"
STARTED = "STARTED"
RUNNING = "RUNNING"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
CANCELLED = "CANCELLED"
REVOKED = "REVOKED"
TERMINATED = "TERMINATED"
REJECTED = "REJECTED"
RETRY = "RETRY"
@classmethod
def categories(cls) -> Dict[str, List[str]]:
return {
"Pending": [
cls.WAITING.name,
cls.DISPATCHED.name,
cls.PENDING.name,
cls.RECEIVED.name,
cls.RETRY.name,
],
"Running": [cls.STARTED.name, cls.RUNNING.name],
"Finished": [cls.SUCCESS.name],
"Failed": [cls.FAILURE.name, cls.REJECTED.name],
"Cancelled": [cls.CANCELLED.name, cls.REVOKED.name, cls.TERMINATED.name],
}
@classmethod
def category(cls, status: str) -> str:
if status in (
cls.WAITING.name,
cls.DISPATCHED.name,
cls.PENDING.name,
cls.RECEIVED.name,
cls.RETRY.name,
):
return "Pending"
if status in (cls.STARTED.name, cls.RUNNING.name):
return "Running"
if status in (cls.SUCCESS.name,):
return "Finished"
if status in (cls.FAILURE.name, cls.REJECTED.name):
return "Failed"
if status in (cls.CANCELLED.name, cls.REVOKED.name, cls.TERMINATED.name):
return "Cancelled"
return "Other"
@classmethod
def has_ended(cls, status: str) -> bool:
return status in [
member.value
for member in (
cls.SUCCESS,
cls.FAILURE,
cls.CANCELLED,
cls.REVOKED,
cls.TERMINATED,
)
]
@classmethod
def rank(cls, status: str) -> int:
return {
"WAITING": 0,
"DISPATCHED": 1,
"PENDING": 2,
"RECEIVED": 3,
"STARTED": 4,
"RUNNING": 5,
"SUCCESS": 6,
"CANCELLED": 7,
"REVOKED": 8,
"TERMINATED": 9,
"FAILURE": 10,
}.get(status, 0)
@classmethod
def highest(cls, statuses: List[str]) -> str:
ranks = [JobStatus.rank(status) for status in statuses]
max_rank = max(ranks)
max_index = ranks.index(max_rank)
return statuses[max_index]
@classmethod
def icon(cls, status: str) -> str:
icon = "loader"
icon = "play-circle" if status == cls.STARTED.value else icon
icon = "check-circle" if status == cls.SUCCESS.value else icon
icon = (
"x-octagon"
if status
in [member.value for member in (cls.FAILURE, cls.REJECTED, cls.REVOKED,)]
else icon
)
icon = (
"slash"
if status in [member.value for member in (cls.CANCELLED, cls.TERMINATED,)]
else icon
)
icon = "rotate-cw" if status == cls.RETRY.value else icon
return icon
@classmethod
def colour(cls, status: str) -> str:
icon = "info"
icon = "success" if status == cls.SUCCESS.value else icon
icon = (
"danger"
if status
in [member.value for member in (cls.FAILURE, cls.REJECTED, cls.REVOKED,)]
else icon
)
icon = (
"grey-light"
if status in [member.value for member in (cls.CANCELLED, cls.TERMINATED,)]
else icon
)
icon = "warning" if status == cls.RETRY.value else icon
return icon
def generate_job_id():
return shortuuid.uuid()
def generate_job_key():
return shortuuid.ShortUUID().random(length=32)
class Job(models.Model):
id = models.BigAutoField(
primary_key=True,
help_text="An autoincrementing integer to allow selecting jobs in the order they were created.",
)
key = models.CharField(
default=generate_job_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access the job with.",
)
description = models.TextField(
null=True,
blank=True,
help_text="A short description of the job.",
)
project = models.ForeignKey(
"projects.Project",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="jobs",
help_text="The project this job is associated with.",
)
snapshot = models.ForeignKey(
"projects.Snapshot",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="jobs",
help_text="The snapshot that this job is associated with. "
"Usually `session` jobs for the snapshot.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="jobs_created",
help_text="The user who created the job.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the job was created."
)
updated = models.DateTimeField(
auto_now=True, help_text="The time the job was last updated."
)
queue = models.ForeignKey(
Queue,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="jobs",
help_text="The queue that this job was routed to",
)
parent = models.ForeignKey(
"Job",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="children",
help_text="The parent job",
)
began = models.DateTimeField(
null=True, blank=True, help_text="The time the job began."
)
ended = models.DateTimeField(
null=True, blank=True, help_text="The time the job ended."
)
status = models.CharField(
max_length=32,
choices=JobStatus.as_choices(),
null=True,
blank=True,
help_text="The current status of the job.",
)
is_active = models.BooleanField(
default=False, db_index=True, help_text="Is the job active?",
)
method = models.CharField(
max_length=32, choices=JobMethod.as_choices(), help_text="The job method."
)
params = models.JSONField(
null=True, blank=True, help_text="The parameters of the job; a JSON object."
)
result = models.JSONField(
null=True, blank=True, help_text="The result of the job; a JSON value."
)
error = models.JSONField(
null=True,
blank=True,
help_text="Any error associated with the job; a JSON object with type, message etc.",
)
log = models.JSONField(
null=True,
blank=True,
help_text="The job log; a JSON array of log objects, including any errors.",
)
runtime = models.FloatField(
null=True, blank=True, help_text="The running time of the job."
)
urls = models.JSONField(
null=True,
blank=True,
help_text="The URLs of the job on the local network (by protocol); can be used to interact with it.",
)
users = models.ManyToManyField(
User,
blank=True,
help_text="Users who have created or connected to the job; not necessarily currently connected.",
)
anon_users = models.ManyToManyField(
AnonUser,
blank=True,
help_text="Anonymous users who have created or connected to the job.",
)
worker = models.CharField(
max_length=64,
null=True,
blank=True,
help_text="The identifier of the worker that ran the job.",
)
retries = models.IntegerField(
null=True, blank=True, help_text="The number of retries to fulfil the job.",
)
callback_type = models.ForeignKey(
ContentType,
null=True,
blank=True,
on_delete=models.CASCADE,
help_text="The type of the object to call back.",
)
callback_id = models.CharField(
null=True,
blank=True,
max_length=256,
help_text="The id of the object to call back.",
)
callback_method = models.CharField(
null=True,
blank=True,
max_length=128,
help_text="The name of the method to call back.",
)
callback_object = GenericForeignKey("callback_type", "callback_id")
secrets = models.JSONField(
null=True,
blank=True,
help_text="Any secrets (e.g. API tokens) required to perform the job. "
"When the job has ended these should be deleted.",
)
@cached_property
|
Apache License 2.0
|
dpkp/kafka-python
|
kafka/metrics/metrics_reporter.py
|
AbstractMetricsReporter.close
|
python
|
def close(self):
raise NotImplementedError
|
Called when the metrics repository is closed.
|
https://github.com/dpkp/kafka-python/blob/f0a57a6a20a3049dc43fbf7ad9eab9635bd2c0b0/kafka/metrics/metrics_reporter.py#L55-L57
|
from __future__ import absolute_import
import abc
class AbstractMetricsReporter(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def init(self, metrics):
raise NotImplementedError
@abc.abstractmethod
def metric_change(self, metric):
raise NotImplementedError
@abc.abstractmethod
def metric_removal(self, metric):
raise NotImplementedError
@abc.abstractmethod
def configure(self, configs):
raise NotImplementedError
@abc.abstractmethod
|
Apache License 2.0
|
tdda/tdda
|
tdda/rexpy/rexpy.py
|
Extractor.rle2re
|
python
|
def rle2re(self, rles, tagged=False, as_re=True):
Cats = self.Cats
parts = []
for (c, freq) in rles:
desc = Cats[c].re_string if as_re else c
part = desc + ('{%d}' % freq if freq > 1 else '')
parts.append(('(%s)' % part) if tagged else part)
return poss_term_re(''.join(parts))
|
Convert run-length-encoded code string to regular expression
|
https://github.com/tdda/tdda/blob/2148de2b042cc34ad33a66df3349c6d5e4a1500b/tdda/rexpy/rexpy.py#L962-L972
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import random
import re
import string
import sys
from collections import Counter, defaultdict, namedtuple, OrderedDict
from pprint import pprint
from tdda import __version__
isPython2 = sys.version_info[0] < 3
str_type = unicode if isPython2 else str
bytes_type = str if isPython2 else bytes
INT_ARRAY = b'i' if sys.version_info[0] < 3 else 'i'
UNESCAPES = '''!"%',/:;<=>@_` '''
USAGE = re.sub(r'^(.*)Python API.*$', '', __doc__.replace('Usage::', 'Usage:'))
MIN_MERGE_SIMILARITY = 0.5
TERMINATE = True
N_ALIGNMENT_LEVELS = 1
MAX_GROUPS = 99
MAX_VRLE_RANGE = 2
VARIABLE_LENGTH_FRAGS = False
VERBOSITY = 0
MAX_PATTERNS = None
MIN_DIFF_STRINGS_PER_PATTERN = 1
MIN_STRINGS_PER_PATTERN = 1
USE_SAMPLING = False
RE_FLAGS = re.UNICODE | re.DOTALL
DIALECTS = ['perl']
class SIZE(object):
if USE_SAMPLING:
DO_ALL = 100
else:
DO_ALL = 100000000
DO_ALL_EXCEPTIONS = 4000
N_PER_LENGTH = 64
MAX_SAMPLED_ATTEMPTS = 2
MAX_PUNC_IN_GROUP = 5
MAX_STRINGS_IN_GROUP = 10
nCalls = 0
memo = {}
def cre(rex):
global nCalls, memo
nCalls += 1
c = memo.get(rex)
if c:
return c
else:
memo[rex] = c = re.compile(rex, RE_FLAGS)
return c
def terminated_cre(expr):
return cre('^%s$' % expr)
def terminated_re(expr):
return '^%s$' % expr
if TERMINATE:
poss_term_cre = terminated_cre
poss_term_re = terminated_re
else:
def poss_term_re(expr):
return expr
def poss_term_cre(expr):
return cre(expr)
class CODE(object):
ANY = '?'
PUNC = '.'
class Category(object):
def __init__(self, name, code, re_string):
self.name = name
self.code = code
self.re_string = re_string
self.re_single = poss_term_cre(re_string)
self.re_multiple = poss_term_cre(re_string + '+')
UNICHRS = True
UNIC = 'Ḉ'
COARSEST_ALPHANUMERIC_CODE = UNIC if UNICHRS else 'C'
class Categories(object):
escapableCodes = '.*?'
def __init__(self, extra_letters=None, full_escape=False, dialect=None):
if extra_letters:
assert all(L in '_.-' for L in extra_letters)
extra_letters = ''.join(e for e in '_.-'
if e in extra_letters)
el_re = extra_letters
el_re_exc = '' if '_' in extra_letters else '_'
else:
el_re = ''
el_re_exc = '_'
el_re_inc = (extra_letters or '').replace('_', '')
punctuation = self.PunctuationChars(el_re)
self.dialect = dialect
self.extra_letters = extra_letters or ''
self.full_escape = full_escape
self.LETTER = Category('LETTER', 'A', '[A-Z]')
self.letter = Category('letter', 'a', '[a-z]')
self.Letter = Category('Letter', 'L', '[A-Za-z]')
self.ULetter = Category('ULetter', 'Ḹ', r'[^\W0-9_]')
if extra_letters:
self.LETTER_ = Category('LETTER_', 'B', '[A-Z%s]' % el_re)
self.letter_ = Category('letter_', 'b', '[a-z%s]' % el_re)
self.Letter_ = Category('Letter_', 'M', '[A-Za-z%s]' % el_re)
if extra_letters == '_':
self.ULetter_ = Category('ULetter_', 'Ṃ', r'[^\W0-9]')
else:
p = u_alpha_numeric_re(el_re_inc, el_re_exc, digits=False,
dialect=dialect)
self.ULetter_ = Category('ULetter_', 'Ṃ', p)
ExtraLetterGroups = ['LETTER_', 'letter_', 'Letter_'] + (
['ULetter_'] if UNICHRS else []
)
else:
self.ULetter_ = Category('ULetter_', 'Ṃ', r'[^\W0-9_]')
ExtraLetterGroups = []
self.Digit = Category('Digit', 'D', r'\d')
self.hex = Category('hex', 'h', '[0-9a-f]')
self.HEX = Category('HEX', 'H', '[0-9A-F]')
self.Hex = Category('Hex', 'X', '[0-9a-fA-F]')
self.ALPHANUMERIC = Category('ALPHANUMERIC', 'N', '[A-Z0-9%s]' % el_re)
self.alphanumeric = Category('alphanumeric', 'n', '[a-z0-9%s]' % el_re)
self.AlphaNumeric = Category('AlphaNumeric', 'C',
'[A-Za-z0-9%s]' % el_re)
self.UAlphaNumeric = Category('UAlphaNumeric', 'Ḉ',
u_alpha_numeric_re(el_re_inc, el_re_exc,
dialect=dialect))
self.Whitespace = Category('Whitespace', ' ', r'\s')
self.Punctuation = Category('Punctuation', CODE.PUNC,
escaped_bracket(punctuation,
dialect=dialect))
self.Other = Category('Other', '*', r'[^!-~\s]')
self.Any = Category('Any', CODE.ANY, '.')
self.SpecificCoarseCats = [self.UAlphaNumeric if UNICHRS
else self.AlphaNumeric,
self.Whitespace,
self.Punctuation]
self.AllCoarseCats = self.SpecificCoarseCats + [self.Other]
self.IncreasinglyGeneralAlphanumerics = [
'Digit',
'LETTER', 'letter', 'Letter',
] + (
['ULetter'] if UNICHRS else []
) + ExtraLetterGroups + [
'HEX', 'hex', 'Hex',
'ALPHANUMERIC', 'alphanumeric', 'AlphaNumeric',
] + (
['UAlphaNumeric'] if UNICHRS else []
)
def PunctuationChars(self, el_re):
specials = re.compile(r'[A-Za-z0-9\s%s]' % el_re, RE_FLAGS)
return [chr(c) for c in range(32, 127) if not re.match(specials,
chr(c))]
def build_cat_map(self):
self.code2cat = {}
for k in self.__dict__:
cat = self.__dict__[k]
code = getattr(cat, 'code', None)
if code:
self.code2cat[code] = cat
def __getitem__(self, k):
if not hasattr(self, 'code2cat'):
self.build_cat_map()
return self.code2cat[k]
@classmethod
def escape_code(cls, code):
return escape(code, full=False) if code in cls.escapableCodes else code
def escape(self, s, full=None):
return escape(s, full=self.full_escape if full is None else full)
class Fragment(namedtuple('Fragment', 're group')):
class Coverage(namedtuple('Coverage', 'n n_uniq incr incr_uniq index')):
class Extractor(object):
def __init__(self, examples, extract=True, tag=False, extra_letters=None,
full_escape=False,
remove_empties=False, strip=False,
variableLengthFrags=VARIABLE_LENGTH_FRAGS,
specialize=False,
max_patterns=MAX_PATTERNS,
min_diff_strings_per_pattern=MIN_DIFF_STRINGS_PER_PATTERN,
min_strings_per_pattern=MIN_STRINGS_PER_PATTERN,
seed=None, dialect=None, verbose=VERBOSITY):
self.verbose = verbose
self.example_freqs = Counter()
if USE_SAMPLING:
self.by_length = defaultdict(list)
self.n_stripped = 0
self.n_empties = 0
self.n_nulls = 0
self.remove_empties = remove_empties
self.strip = strip
self.variableLengthFrags = variableLengthFrags
self.specialize = specialize
self.tag = tag
self.clean(examples)
self.results = None
self.warnings = []
self.n_too_many_groups = 0
self.Cats = Categories(self.thin_extras(extra_letters),
full_escape=full_escape, dialect=dialect)
self.max_patterns = max_patterns
self.min_diff_strings_per_pattern = min_diff_strings_per_pattern
self.min_strings_per_pattern = min_strings_per_pattern
if extract:
self.extract()
def extract(self):
if len(self.example_freqs) == 0:
self.results = None
if len(self.example_freqs) <= SIZE.DO_ALL:
self.results = self.batch_extract(self.example_freqs.keys())
else:
examples = self.sample(SIZE.N_PER_LENGTH)
attempt = 1
failures = []
while attempt <= SIZE.MAX_SAMPLED_ATTEMPTS + 1:
if self.verbose:
print('Pass %d' % attempt)
print('Examples: %s ... %s' % (examples[:5],
examples[-5:]))
self.results = self.batch_extract(examples)
failures = self.find_non_matches()
if self.verbose:
print('REs:', self.results.rex)
print('Failures (%d): %s' % (len(failures),
failures[:5]))
if len(failures) == 0:
break
elif (len(failures) <= SIZE.DO_ALL_EXCEPTIONS
or attempt > SIZE.MAX_SAMPLED_ATTEMPTS):
examples.extend(failures)
else:
examples.extend(random.sample(failures,
SIZE.DO_ALL_EXCEPTIONS))
attempt += 1
self.add_warnings()
def add_warnings(self):
if self.n_too_many_groups:
self.warnings.append('%d string%s assigned to .{m,n} for needing '
'"too many" groups.'
% (self.n_too_many_groups,
's' if self.n_too_many_groups > 1 else ''))
def thin_extras(self, extra_letters):
if not extra_letters or len(extra_letters) == 1:
return extra_letters
keep = []
for L in extra_letters:
if any(L in example for example in self.example_freqs):
keep.append(L)
return ''.join(keep) if keep else None
def clean(self, examples):
isdict = isinstance(examples, dict)
for s in examples:
n = examples[s] if isdict else 1
if s is None:
self.n_nulls += 1
else:
stripped = s.strip() if self.strip else s
L = len(stripped)
if self.remove_empties and L == 0:
self.n_empties += n
else:
self.example_freqs[stripped] += n
if len(stripped) != len(s):
self.n_stripped += n
for k in [k for (k, n) in self.example_freqs.items() if n == 0]:
del self.example_freqs[k]
if self.verbose > 1:
print('Examples:')
pprint(self.example_freqs)
print()
def batch_extract(self, examples):
rles = [self.run_length_encode_coarse_classes(s) for s in examples]
rle_freqs = Counter()
for r in rles:
rle_freqs[r] += 1
vrles = to_vrles(rle_freqs.keys())
vrle_freqs = Counter()
refined = []
for r in vrles:
vrle_freqs[r] += 1
grouped = self.refine_groups(r, self.example_freqs)
refined.append(grouped)
merged = self.merge_patterns(refined)
if self.specialize:
merged = self.specialize_patterns(merged)
mergedrex = [self.vrle2re(m, tagged=self.tag) for m in merged]
mergedfrags = [self.vrle2refrags(m) for m in merged]
return ResultsSummary(rles, rle_freqs, vrles, vrle_freqs,
merged, mergedrex, mergedfrags,
extractor=self)
def specialize(self, patterns):
return patterns
def coarse_classify(self, s):
return ''.join(self.coarse_classify_char(c) for c in s)
def coarse_classify_char(self, c):
Cats = self.Cats
for cat in Cats.SpecificCoarseCats:
if re.match(cat.re_single, c):
return cat.code
assert re.match(Cats.Other.re_single, c)
return Cats.Other.code
def run_length_encode_coarse_classes(self, s):
rle = run_length_encode(self.coarse_classify(s))
if len(rle) <= MAX_GROUPS:
return rle
else:
self.n_too_many_groups += 1
return run_length_encode(CODE.ANY * len(s))
def merge_patterns(self, patterns):
if len(patterns) == 1:
return patterns
patterns = self.sort_by_length(patterns)
level = 0
parts = [patterns]
n_parts = len(parts)
DO_ALIGNMENT = True
while level < N_ALIGNMENT_LEVELS and DO_ALIGNMENT:
parts = self.alignment_step(parts, level)
if len(parts) == n_parts:
level += 1
else:
n_parts = len(parts)
patterns = self.join_parts(parts)
return patterns
def alignment_step(self, parts, level):
new_parts = []
n_parts = len(parts)
for part in parts:
if level == 0:
new_parts.extend(self.merge_fixed_omnipresent_at_pos(part))
elif level == 1:
new_parts.extend(self.merge_fixed_only_present_at_pos(part))
else:
raise ValueError('Level out of range (%d)' % level)
if self.verbose:
print('\nOUT:')
print(self.aligned_parts(new_parts))
return new_parts
def merge_fixed_omnipresent_at_pos(self, patterns):
lstats = length_stats(patterns)
if lstats.max_length <= 1:
return [patterns]
frags = set()
for frag in patterns[0]:
if len(frag) == 4:
frags.add(frag)
frags = list(frags)
for pattern in patterns[1:]:
i = 0
while i < len(frags):
if not frags[i] in pattern:
del frags[i]
else:
i += 1
if not frags:
return [patterns]
leftPos = {frag: Counter() for frag in frags}
rightPos = {frag: Counter() for frag in frags}
for pattern in patterns:
n = len(pattern)
for i, frag in enumerate(pattern):
if frag in frags:
leftPos[frag][i] += 1
if not lstats.all_same_length:
rightPos[frag][n - i] += 1
nPatterns = len(patterns)
leftFixed = get_omnipresent_at_pos(leftPos, nPatterns)
if leftFixed:
return left_parts(patterns, leftFixed)
rightFixed = get_omnipresent_at_pos(rightPos, nPatterns,
verbose=self.verbose)
if rightFixed:
return right_parts(patterns, rightFixed)
return [patterns]
def merge_fixed_only_present_at_pos(self, patterns):
lstats = length_stats(patterns)
if lstats.max_length <= 1:
return [patterns]
frags = set()
for pattern in patterns:
for frag in pattern:
if len(frag) == 4:
frags.add(frag)
if not frags:
return [patterns]
leftPos = {frag: Counter() for frag in frags}
rightPos = {frag: Counter() for frag in frags}
for pattern in patterns:
n = len(pattern)
for i, frag in enumerate(pattern):
if frag in frags:
leftPos[frag][i] += 1
if not lstats.all_same_length:
rightPos[frag][n - i] += 1
nPatterns = len(patterns)
leftFixed = get_only_present_at_pos(leftPos)
if leftFixed:
if self.verbose:
print('LEFT FOUND!', leftFixed)
return left_parts(patterns, leftFixed)
rightFixed = get_only_present_at_pos(rightPos, verbose=self.verbose)
if rightFixed:
if self.verbose:
print('RIGHT FOUND!', rightFixed)
return right_parts(patterns, rightFixed)
return [patterns]
def join_parts(self, parts):
if not parts:
return []
out = [[] for i in range(len(parts[0]))]
for part in parts:
for i, row in enumerate(part):
if row:
out[i].extend(row)
return out
def sort_by_length(self, patterns):
if patterns:
M = max(len(p) for p in patterns if p is not None) + 1
return list(sorted(patterns,
key=lambda p: len(p) if p is not None else M))
else:
return []
def pad(self, p, q):
if self.verbose:
print(self.vrle2re(self.despecify(p), True))
print(self.vrle2re(self.despecify(q), True))
print()
return p
def despecify(self, pattern):
return list(self.despecify_frag(frag) for frag in pattern)
def despecify_frag(self, frag):
r, m, M = frag[:3]
if m == M == 1:
return frag
else:
return (r, 1, None) if len(frag) == 3 else (r, 1, None, 'fixed')
def similarity(self, p, q):
return 1
def sample(self, nPerLength):
lengths = self.by_length.keys()
lengths.sort()
examples = []
for L in lengths:
x = self.by_length[L]
if len(self.by_length[L]) <= nPerLength:
examples.extend(x)
else:
examples.extend(random.sample(x, nPerLength))
return examples
def find_non_matches(self):
failures = self.example_freqs.keys()
if self.results:
for r in self.results.rex:
cr = cre(r)
i = len(failures) - 1
while i >= 0:
f = failures[i]
if re.match(cr, f):
del failures[i]
i -= 1
return failures
def pattern_matches(self):
compiled = [cre(r) for r in self.results.rex]
results = OrderedDict()
for x in self.example_freqs.keys():
for i, r in enumerate(self.results.rex):
cr = cre(r)
if re.match(cr, x):
try:
results[i].append(x)
except:
results[i] = [x]
break
else:
print('Example "%s" did not match any pattern' % x)
return results
def analyse_groups(self, pattern, examples):
regex = cre(self.vrle2re(pattern, tagged=True))
n_groups = len(pattern)
group_chars = [set([]) for i in range(n_groups)]
group_strings = [set([]) for i in range(n_groups)]
group_rlefcs = [None] * n_groups
group_rlecs = [None] * n_groups
n_strings = [0] * n_groups
for example in examples:
m = re.match(regex, example)
if m:
f = group_map_function(m, n_groups)
for i in range(n_groups):
g = m.group(f(i + 1))
if n_strings[i] <= SIZE.MAX_STRINGS_IN_GROUP:
group_strings[i].add(g)
n_strings[i] = len(group_strings[i])
group_chars[i] = group_chars[i].union(set(list(g)))
(group_rlefcs[i],
group_rlecs[i]) = self.rle_fc_c(g, pattern[i],
group_rlefcs[i],
group_rlecs[i])
if self.verbose >= 2:
print('Fine Class VRLE:', group_rlefcs)
print(' Char VRLE:', group_rlecs)
return zip(group_chars, group_strings,
group_rlefcs, group_rlecs,
pattern)
def refine_groups(self, pattern, examples):
ga = self.analyse_groups(pattern, examples)
out = []
Cats = self.Cats
n_groups = len(pattern)
for group, (chars, strings, rlefc, rlec, fragment) in enumerate(ga):
(c, m, M) = fragment
char_str = ''.join(sorted(chars))
fixed = False
refined = None
if len(strings) == 1:
refined = self.Cats.escape(list(strings)[0])
m = M = 1
fixed = True
elif len(chars) == 1:
refined = self.Cats.escape(list(chars)[0])
fixed = True
elif c == COARSEST_ALPHANUMERIC_CODE:
if rlec:
if self.verbose >= 2:
print('SAME CHARS: %s' % rlec)
out.extend(plusify_vrles(rlec))
continue
elif rlefc:
if self.verbose >= 2:
print('SAME FINE CLASSES: %s' % rlefc)
if n_groups + len(rlefc) - 1 <= MAX_GROUPS:
out.extend(plusify_vrles(rlefc))
n_groups += len(rlefc) - 1
continue
for k in Cats.IncreasinglyGeneralAlphanumerics:
cat = getattr(Cats, k)
if type(cat) == tuple:
print('>>>', cat)
code = cat.code
if re.match(cat.re_multiple, char_str):
refined = Cats.escape_code(code)
break
else:
refined = c
elif (c == CODE.PUNC
and len(chars) <= SIZE.MAX_PUNC_IN_GROUP):
refined = escaped_bracket(char_str, dialect=self.Cats.dialect)
fixed = True
else:
refined = c
if fixed:
out.append((refined, m, M, 'fixed'))
else:
out.append((refined, m, M))
return out
def rle_fc_c(self, s, pattern, rlefc_in, rlec_in):
if (pattern[0] != COARSEST_ALPHANUMERIC_CODE
or (rlefc_in == False and rlec_in == False)):
return (False, False)
rlefc = []
rlec = []
last_fc = None
last_c = None
for c in s:
fc = self.fine_class(c)
if fc == last_fc:
nfc += 1
else:
if last_fc:
rlefc.append((last_fc, nfc))
last_fc = fc
nfc = 1
if c == last_c:
nc += 1
else:
if last_c:
rlec.append((last_c, nc))
last_c = c
nc = 1
if last_c:
rlefc.append((last_fc, nfc))
rlec.append((last_c, nc))
v = self.variableLengthFrags
return (expand_or_falsify_vrle(rlefc, rlefc_in, variableLength=v),
expand_or_falsify_vrle(rlec, rlec_in, fixed=True,
variableLength=v))
def fine_class(self, c):
cats = self.Cats
if c.isdigit():
return cats.Digit.code
elif 'a' <= c <= 'z':
return cats.letter.code
elif 'A' <= c <= 'Z':
return cats.LETTER.code
elif c in cats.extra_letters or not UNICHRS:
return cats.LETTER_.code
else:
return cats.ULetter_.code
def fragment2re(self, fragment, tagged=False, as_re=True):
(c, m, M) = fragment[:3]
fixed = len(fragment) > 3
Cats = self.Cats
regex = c if (fixed or not as_re) else Cats[c].re_string
if (m is None or m == 0) and M is None:
part = regex + '*'
elif M is None:
part = regex + '+'
elif m == M == 1:
part = regex
elif m == M == 2 and len(regex) == 1:
part = regex + regex
elif m == M:
part = regex + ('{%d}' % m)
else:
part = regex + ('?' if m == 0 and M == 1 else ('{%d,%s}' % (m, M)))
return capture_group(part) if (tagged and not fixed) else part
def vrle2re(self, vrles, tagged=False, as_re=True):
parts = [self.fragment2re(frag, tagged=tagged, as_re=as_re)
for frag in vrles]
if self.n_stripped > 0:
ws = [r'\s*']
parts = ws + parts + ws
return poss_term_re(''.join(parts))
def vrle2refrags(self, vrles):
if self.n_stripped > 0:
return ([Fragment(r'\s*', True)]
+ [Fragment(self.fragment2re(frag, tagged=False,
as_re=True),
len(frag) < 4)
for frag in vrles]
+ [Fragment(r'\s*', True)])
else:
return [Fragment(self.fragment2re(frag, tagged=False, as_re=True),
len(frag) < 4)
for frag in vrles]
|
MIT License
|
flexget/flexget
|
flexget/components/series/db.py
|
Season.completed
|
python
|
def completed(self):
if not self.releases:
return False
return any(release.downloaded for release in self.releases)
|
Return True if the season has any released marked as downloaded
|
https://github.com/flexget/flexget/blob/e625eb09324a9d6be4cfb42601c6af4628b2226a/flexget/components/series/db.py#L152-L158
|
import re
from datetime import datetime, timedelta
from functools import total_ordering
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
from loguru import logger
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Unicode,
and_,
delete,
desc,
func,
select,
update,
)
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.orm import backref, relation
from flexget import db_schema, plugin
from flexget.components.series.utils import normalize_series_name
from flexget.event import event, fire_event
from flexget.manager import Session
from flexget.utils.database import quality_property, with_session
from flexget.utils.sqlalchemy_utils import (
create_index,
drop_tables,
table_add_column,
table_columns,
table_exists,
table_schema,
)
from flexget.utils.tools import parse_episode_identifier
if TYPE_CHECKING:
from flexget.components.parsing.parsers.parser_common import SeriesParseResult
from flexget.utils.qualities import Quality
SCHEMA_VER = 14
logger = logger.bind(name='series.db')
Base = db_schema.versioned_base('series', SCHEMA_VER)
class NormalizedComparator(Comparator):
def operate(self, op, other):
if isinstance(other, list):
other = [normalize_series_name(o) for o in other]
else:
other = normalize_series_name(other)
return op(self.__clause_element__(), other)
class Series(Base):
__tablename__ = 'series'
id = Column(Integer, primary_key=True)
_name = Column('name', Unicode)
_name_normalized = Column('name_lower', Unicode, index=True, unique=True)
identified_by = Column(String)
begin_episode_id = Column(
Integer, ForeignKey('series_episodes.id', name='begin_episode_id', use_alter=True)
)
begin = relation(
'Episode',
uselist=False,
primaryjoin="Series.begin_episode_id == Episode.id",
foreign_keys=[begin_episode_id],
post_update=True,
backref='begins_series',
)
episodes = relation(
'Episode',
backref='series',
cascade='all, delete, delete-orphan',
primaryjoin='Series.id == Episode.series_id',
)
in_tasks = relation(
'SeriesTask',
backref=backref('series', uselist=False),
cascade='all, delete, delete-orphan',
)
alternate_names = relation(
'AlternateNames', backref='series', cascade='all, delete, delete-orphan'
)
seasons = relation('Season', backref='series', cascade='all, delete, delete-orphan')
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
self._name_normalized = normalize_series_name(value)
@name.comparator
def name(self):
return NormalizedComparator(self._name_normalized)
@property
def name_normalized(self):
return self._name_normalized
def __str__(self):
return '<Series(id=%s,name=%s)>' % (self.id, self.name)
def __repr__(self):
return str(self).encode('ascii', 'replace')
def episodes_for_season(self, season_num):
return len(
[
episode
for episode in self.episodes
if episode.season == season_num and episode.downloaded_releases
]
)
@property
def completed_seasons(self):
return [season.season for season in self.seasons if season.completed]
class Season(Base):
__tablename__ = 'series_seasons'
id = Column(Integer, primary_key=True)
identifier = Column(String)
identified_by = Column(String)
season = Column(Integer)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
releases = relation('SeasonRelease', backref='season', cascade='all, delete, delete-orphan')
is_season = True
@property
|
MIT License
|
tanghaibao/jcvi
|
jcvi/formats/sam.py
|
coverage
|
python
|
def coverage(args):
p = OptionParser(coverage.__doc__)
p.add_option(
"--format",
default="bigwig",
choices=("bedgraph", "bigwig", "coverage"),
help="Output format",
)
p.add_option("--nosort", default=False, action="store_true", help="Do not sort BAM")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
format = opts.format
if opts.nosort:
logging.debug("BAM sorting skipped")
else:
bamfile = index([bamfile, "--fasta={0}".format(fastafile)])
pf = bamfile.rsplit(".", 2)[0]
sizesfile = Sizes(fastafile).filename
cmd = "genomeCoverageBed -ibam {0} -g {1}".format(bamfile, sizesfile)
if format in ("bedgraph", "bigwig"):
cmd += " -bg"
bedgraphfile = pf + ".bedgraph"
sh(cmd, outfile=bedgraphfile)
if format == "bedgraph":
return bedgraphfile
bigwigfile = pf + ".bigwig"
cmd = "bedGraphToBigWig {0} {1} {2}".format(bedgraphfile, sizesfile, bigwigfile)
sh(cmd)
return bigwigfile
coveragefile = pf + ".coverage"
if need_update(fastafile, coveragefile):
sh(cmd, outfile=coveragefile)
gcf = GenomeCoverageFile(coveragefile)
fw = must_open(opts.outfile, "w")
for seqid, cov in gcf.iter_coverage_seqid():
print("\t".join((seqid, "{0:.1f}".format(cov))), file=fw)
fw.close()
|
%prog coverage fastafile bamfile
Calculate coverage for BAM file. BAM file will be sorted unless with
--nosort.
|
https://github.com/tanghaibao/jcvi/blob/3b161796234670ce1c4894974eaeb590d35cf2a2/jcvi/formats/sam.py#L438-L490
|
import os
import os.path as op
import sys
import logging
from collections import defaultdict
from itertools import groupby
from jcvi.formats.base import LineFile, must_open
from jcvi.formats.fasta import Fasta
from jcvi.formats.sizes import Sizes
from jcvi.utils.cbook import fill
from jcvi.assembly.base import Astat
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
Popen,
PIPE,
need_update,
sh,
mkdir,
glob,
popen,
get_abs_path,
)
class SamLine(object):
def __init__(self, row):
args = row.strip().split("\t")
self.qname = args[0]
self.flag = int(args[1])
self.rname = args[2]
self.pos = args[3]
self.mapq = args[4]
self.cigar = args[5]
self.mrnm = args[6]
self.mpos = args[7]
self.isize = args[8]
self.seq = args[9]
self.qual = args[10]
self.extra = args[11:]
def __str__(self):
return "\t".join(
str(x)
for x in (
self.qname,
self.flag,
self.rname,
self.pos,
self.mapq,
self.cigar,
self.mrnm,
self.mpos,
self.isize,
self.seq,
self.qual,
"\t".join(self.extra),
)
)
@property
def orientation(self):
return "-" if self.flag & 0x10 == 0 else "+"
def update_readname(self):
if self.flag & 0x40 == 0:
tag = "/1"
elif self.flag & 0x80 == 0:
tag = "/2"
else:
tag = ""
self.qname += tag
@property
def pairline(self):
qpos = self.cigar.split("H", 1)[0]
return "%s:%s\t%s:%s" % (self.qname, qpos, self.rname, self.pos)
class Sam(LineFile):
def __init__(self, filename, callback=None):
super(Sam, self).__init__(filename)
fp = open(filename)
for row in fp:
if row[0] == "@":
continue
s = SamLine(row)
if callback:
callback(s)
def output_bam(cmd, outfile, cpus=8):
bam = outfile.endswith(".bam")
if not bam:
return cmd + " > {0}".format(outfile)
outcmd, mflag = ("samtools view -bS", "-@ {0}".format(cpus))
cmd += " | {0} {1} - > {2}".format(outcmd, mflag, outfile)
return cmd
class GenomeCoverageLine(object):
def __init__(self, row):
args = row.split()
self.seqid = args[0]
self.depth = int(args[1])
self.positions = int(args[2])
self.length = int(args[3])
self.freq = float(args[4])
class GenomeCoverageFile(LineFile):
def __init__(self, filename):
super(GenomeCoverageFile, self).__init__(filename)
fp = open(filename)
for row in fp:
self.append(GenomeCoverageLine(row))
def iter_coverage_seqid(self):
for seqid, lines in groupby(self, key=lambda x: x.seqid):
lines = list(lines)
length = lines[0].length
counts = 0
for r in lines:
counts += r.depth * r.positions
yield seqid, counts * 1.0 / length
def get_prefix(readfile, dbfile):
rdpf = op.basename(readfile).replace(".gz", "").rsplit(".", 1)[0]
dbpf = op.basename(dbfile).split(".")[0]
return ".".join((rdpf, dbpf))
def get_samfile(
readfile, dbfile, bam=False, mapped=False, unmapped=False, bowtie=False
):
prefix = get_prefix(readfile, dbfile)
ext = ".bam" if bam else ".sam"
samfile = prefix + ext
ext = ".fastq" if bowtie else ext
mapped = (prefix + ".mapped" + ext) if mapped else None
unmapped = (prefix + ".unmapped" + ext) if unmapped else None
return samfile, mapped, unmapped
def get_minibam(bamfile, region, overwrite=True):
xregion = region.replace(":", "_").replace("-", "_").replace(",", "")
minibamfile = op.basename(bamfile).replace(".bam", ".{}.bam".format(xregion))
baifile = minibamfile + ".bai"
if op.exists(baifile):
sh("rm {}".format(baifile))
if not overwrite and op.exists(minibamfile):
logging.error("Output name exists: `{}`".format(minibamfile))
return
cmd = "samtools view {} {} -b".format(bamfile, region)
cmd += " -o {0}".format(minibamfile)
sh(cmd)
sh("samtools index {0}".format(minibamfile))
return minibamfile
def get_minibam_bed(bamfile, bedfile, minibam=None):
pf = op.basename(bedfile).split(".")[0]
minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf))
minisamfile = minibam.replace(".bam", ".sam")
baifile = minibamfile + ".bai"
if op.exists(baifile):
sh("rm {}".format(baifile))
cmd = "samtools view -H {} > {}".format(bamfile, minisamfile)
sh(cmd)
cmd = "cat {}".format(bedfile)
cmd += " | perl -lane 'print \"$F[0]:$F[1]-$F[2]\"'"
cmd += " | xargs -n1 -t -I \{\}"
cmd += " samtools view {}".format(bamfile)
cmd += " \{\} >> " + minisamfile
sh(cmd)
cmd = "samtools view {} -b".format(minisamfile)
cmd += " | samtools sort -"
cmd += " -o {0}".format(minibamfile)
sh(cmd)
sh("samtools index {0}".format(minibamfile))
return minibamfile
def main():
actions = (
("append", "append or prepend string to read names"),
("bed", "convert bam files to bed"),
("fastq", "convert bam files to paired fastq"),
("pair", "parse sam file and get pairs"),
("pairs", "print paired-end reads from BAM file"),
("chimera", "parse sam file from `bwasw` and list multi-hit reads"),
("noclip", "remove clipped reads from bam"),
("ace", "convert sam file to ace"),
("consensus", "convert bam alignments to consensus FASTA"),
("fpkm", "calculate FPKM values from BAM file"),
("coverage", "calculate depth for BAM file"),
("vcf", "call SNPs on a set of bam files"),
("mapped", "extract mapped/unmapped reads from samfile"),
("count", "count the number of reads mapped using htseq"),
("merge", "merge bam files"),
("index", "convert to bam, sort and then index"),
("mini", "extract mini-bam for a single region"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def fastq(args):
p = OptionParser(fastq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, pf = args
singletons = pf + ".se.fastq"
a = pf + ".read1.fastq"
b = pf + ".read2.fastq"
cmd = "samtools collate -uOn 128 {} tmp-prefix".format(bamfile)
cmd += " | samtools fastq -s {} -1 {} -2 {} -".format(singletons, a, b)
sh(cmd)
if os.stat(singletons).st_size == 0:
os.remove(singletons)
return a, b
def mini(args):
p = OptionParser(mini.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, region = args
get_minibam(bamfile, region)
def noclip(args):
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(bamfile,) = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam))
def append(args):
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(bamfile,) = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == "@":
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin)
def bed(args):
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
bedfile = args[0]
bamfiles = args[1:]
for bamfile in bamfiles:
cmd = "bamToBed -i {0}".format(bamfile)
sh(cmd, outfile=bedfile, append=True)
def merge(args):
from jcvi.apps.grid import MakeManager
p = OptionParser(merge.__doc__)
p.set_sep(sep="_", help="Separator to group per prefix")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
merged_bams = args[0]
bamdirs = args[1:]
mkdir(merged_bams)
bams = []
for x in bamdirs:
bams += glob(op.join(x, "*.bam"))
bams = [x for x in bams if "nsorted" not in x]
logging.debug("Found a total of {0} BAM files.".format(len(bams)))
sep = opts.sep
key = lambda x: op.basename(x).split(sep)[0]
bams.sort(key=key)
mm = MakeManager()
for prefix, files in groupby(bams, key=key):
files = sorted(list(files))
nfiles = len(files)
source = " ".join(files)
target = op.join(merged_bams, op.basename(files[0]))
if nfiles == 1:
source = get_abs_path(source)
cmd = "ln -s {0} {1}".format(source, target)
mm.add("", target, cmd)
else:
cmd = "samtools merge -@ 8 {0} {1}".format(target, source)
mm.add(files, target, cmd, remove=True)
mm.write()
def count(args):
p = OptionParser(count.__doc__)
p.add_option("--type", default="exon", help="Only count feature type")
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, gtf = args
cpus = opts.cpus
pf = bamfile.split(".")[0]
countfile = pf + ".count"
if not need_update(bamfile, countfile):
return
nsorted = pf + "_nsorted"
nsortedbam, nsortedsam = nsorted + ".bam", nsorted + ".sam"
if need_update(bamfile, nsortedsam):
cmd = "samtools sort -@ {0} -n {1} {2}".format(cpus, bamfile, nsorted)
sh(cmd)
cmd = "samtools view -@ {0} -h {1}".format(cpus, nsortedbam)
sh(cmd, outfile=nsortedsam)
if need_update(nsortedsam, countfile):
cmd = "htseq-count --stranded=no --minaqual=10"
cmd += " -t {0}".format(opts.type)
cmd += " {0} {1}".format(nsortedsam, gtf)
sh(cmd, outfile=countfile)
|
BSD 2-Clause Simplified License
|
v1k45/python-qbittorrent
|
qbittorrent/client.py
|
Client.pause_all
|
python
|
def pause_all(self):
return self._post('torrents/pause', data={'hashes': 'all'})
|
Pause all torrents.
|
https://github.com/v1k45/python-qbittorrent/blob/52b45aeeda383e810f1c5e44e6d0c7c1fdfff257/qbittorrent/client.py#L441-L445
|
import requests
import json
class LoginRequired(Exception):
def __str__(self):
return 'Please login first.'
class Client(object):
def __init__(self, url, verify=True):
if not url.endswith('/'):
url += '/'
self.url = url + 'api/v2/'
self.verify = verify
session = requests.Session()
prefs_url = self.url + 'app/preferences'
check_prefs = session.get(prefs_url, verify=self.verify)
if check_prefs.status_code == 200:
self._is_authenticated = True
self.session = session
elif check_prefs.status_code == 404:
self._is_authenticated = False
raise RuntimeError("""
This wrapper only supports qBittorrent applications
with version higher than 4.1.x.
Please use the latest qBittorrent release.
""")
else:
self._is_authenticated = False
def _get(self, endpoint, **kwargs):
return self._request(endpoint, 'get', **kwargs)
def _post(self, endpoint, data, **kwargs):
return self._request(endpoint, 'post', data, **kwargs)
def _request(self, endpoint, method, data=None, **kwargs):
final_url = self.url + endpoint
if not self._is_authenticated:
raise LoginRequired
kwargs['verify'] = self.verify
if method == 'get':
request = self.session.get(final_url, **kwargs)
else:
request = self.session.post(final_url, data, **kwargs)
request.raise_for_status()
request.encoding = 'utf_8'
if len(request.text) == 0:
data = json.loads('{}')
else:
try:
data = json.loads(request.text)
except ValueError:
data = request.text
return data
def login(self, username='admin', password='admin'):
self.session = requests.Session()
login = self.session.post(self.url + 'auth/login',
data={'username': username,
'password': password},
verify=self.verify)
if login.text == 'Ok.':
self._is_authenticated = True
else:
return login.text
def logout(self):
response = self._get('auth/logout')
self._is_authenticated = False
return response
@property
def qbittorrent_version(self):
return self._get('app/version')
@property
def api_version(self):
return self._get('app/webapiVersion')
def shutdown(self):
return self._get('app/shutdown')
def get_default_save_path(self):
return self._get('app/defaultSavePath')
def get_log(self, **params):
return self._get('log/main', params=params)
def torrents(self, **filters):
params = {}
for name, value in filters.items():
name = 'filter' if name == 'status' else name
params[name] = value
return self._get('torrents/info', params=params)
def get_torrent(self, infohash):
return self._get('torrents/properties?hash=' + infohash.lower())
def get_torrent_trackers(self, infohash):
return self._get('torrents/trackers?hash=' + infohash.lower())
def get_torrent_webseeds(self, infohash):
return self._get('torrents/webseeds?hash=' + infohash.lower())
def get_torrent_files(self, infohash):
return self._get('torrents/files?hash=' + infohash.lower())
def get_torrent_piece_states(self, infohash):
return self._get('torrents/pieceStates?hash=' + infohash.lower())
def get_torrent_piece_hashes(self, infohash):
return self._get('torrents/pieceHashes?hash=' + infohash.lower())
@property
def global_transfer_info(self):
return self._get('transfer/info')
@property
def preferences(self):
prefs = self._get('app/preferences')
class Proxy(Client):
def __init__(self, url, prefs, auth, session):
self.url = url
self.prefs = prefs
self._is_authenticated = auth
self.session = session
def __getitem__(self, key):
return self.prefs[key]
def __setitem__(self, key, value):
kwargs = {key: value}
return self.set_preferences(**kwargs)
def __call__(self):
return self.prefs
return Proxy(self.url, prefs, self._is_authenticated, self.session)
def sync_main_data(self, rid=0):
return self._get('sync/maindata', params={'rid': rid})
def sync_peers_data(self, infohash, rid=0):
params = {'hash': infohash.lower(), 'rid': rid}
return self._get('sync/torrentPeers', params=params)
def download_from_link(self, link, **kwargs):
old_arg_map = {'save_path': 'savepath'}
options = kwargs.copy()
for old_arg, new_arg in old_arg_map.items():
if options.get(old_arg) and not options.get(new_arg):
options[new_arg] = options[old_arg]
if isinstance(link, list):
options['urls'] = "\n".join(link)
else:
options['urls'] = link
dummy_file = {'_dummy': (None, '_dummy')}
return self._post('torrents/add', data=options, files=dummy_file)
def download_from_file(self, file_buffer, **kwargs):
if isinstance(file_buffer, list):
torrent_files = {}
for i, f in enumerate(file_buffer):
torrent_files.update({'torrents%s' % i: f})
else:
torrent_files = {'torrents': file_buffer}
data = kwargs.copy()
if data.get('save_path'):
data.update({'savepath': data['save_path']})
return self._post('torrents/add', data=data, files=torrent_files)
def add_trackers(self, infohash, trackers):
data = {'hash': infohash.lower(),
'urls': trackers}
return self._post('torrents/addTrackers', data=data)
def set_torrent_location(self, infohash_list, location):
data = self._process_infohash_list(infohash_list)
data['location'] = location
return self._post('torrents/setLocation', data=data)
def set_torrent_name(self, infohash, name):
data = {'hash': infohash.lower(),
'name': name}
return self._post('torrents/rename', data=data)
@staticmethod
def _process_infohash_list(infohash_list):
if isinstance(infohash_list, list):
data = {'hashes': '|'.join([h.lower() for h in infohash_list])}
else:
data = {'hashes': infohash_list.lower()}
return data
def pause(self, infohash):
return self._post('torrents/pause', data={'hashes': infohash.lower()})
|
MIT License
|
nguy/artview
|
artview/components/select_region.py
|
SelectRegion.update_points
|
python
|
def update_points(self):
func = self.VpathInteriorFunc.value
if func is not None:
points = func(self.paths)
if points is not None:
self.Vpoints.change(points)
|
Create points object from paths list.
|
https://github.com/nguy/artview/blob/9b522f61054b51979b24150f7f668a05741e92dd/artview/components/select_region.py#L199-L205
|
import numpy as np
import pyart
import os
from matplotlib.path import Path
from matplotlib.lines import Line2D
import csv
from ..core import (Variable, Component, common, VariableChoose,
componentsList, QtWidgets, QtCore)
from ..core.points import Points
class SelectRegion(Component):
Vpoints = None
VplotAxes = None
VpathInteriorFunc = None
Vgatefilter = None
Vradar = None
Vfield = None
@classmethod
def guiStart(self, parent=None):
kwargs, independent = common._SimplePluginStart("SelectRegion").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, VplotAxes=None, VpathInteriorFunc=None, Vfield=None,
name="SelectRegion", parent=None):
super(SelectRegion, self).__init__(name=name, parent=parent)
self.Vpoints = Variable(None)
self.Vgatefilter = Variable(None)
self.Vradar = Variable(None)
if VplotAxes is None:
self.VplotAxes = Variable(None)
else:
self.VplotAxes = VplotAxes
self.fig = None
if VpathInteriorFunc is None:
self.VpathInteriorFunc = Variable(None)
else:
self.VpathInteriorFunc = VpathInteriorFunc
if Vfield is None:
self.Vfield = Variable("")
else:
self.Vfield = Vfield
self.sharedVariables = {
"VplotAxes": self.NewPlotAxes,
"Vgatefilter": None,
"Vradar": None,
"VpathInteriorFunc": self.NewPathInteriorFunc,
"Vfield": None,
"Vpoints": None}
self.connectAllVariables()
self.columns = ("X", "Y", "Azimuth", "Range", "Value",
"Az Index", "R Index")
self._initialize_SelectRegion_vars()
self.CreateSelectRegionWidget()
self.NewPlotAxes(None, False)
self.show()
def _initialize_SelectRegion_vars(self):
self.previous_point = []
self.start_point = []
self.end_point = []
self.line = None
self.verts = [[]]
self.polys = [[]]
self.paths = []
def motion_notify_callback(self, event):
if event.inaxes:
ax = event.inaxes
x, y = event.xdata, event.ydata
if event.button is None and self.line is not None:
self.line.set_data([self.previous_point[0], x],
[self.previous_point[1], y])
self.fig.canvas.draw()
elif event.button == 1:
line = Line2D([self.previous_point[0], x],
[self.previous_point[1], y])
self.previous_point = [x, y]
self.verts[-1].append([x, y])
self.polys[-1].append(ax.add_line(line))
self.fig.canvas.draw()
def button_press_callback(self, event):
db_support = 'dblclick' in dir(event)
if event.inaxes:
x, y = event.xdata, event.ydata
ax = event.inaxes
if event.button == 1 and (not db_support or not event.dblclick):
if self.line is None:
self.line = Line2D([x, x], [y, y], marker='o')
self.start_point = [x, y]
self.previous_point = self.start_point
self.verts[-1].append([x, y])
self.polys[-1].append(ax.add_line(self.line))
self.fig.canvas.draw()
else:
self.line = Line2D([self.previous_point[0], x],
[self.previous_point[1], y],
marker='o')
self.previous_point = [x, y]
self.verts[-1].append([x, y])
self.polys[-1].append(event.inaxes.add_line(self.line))
self.fig.canvas.draw()
elif ((event.button == 1 and db_support and event.dblclick) or
event.button == 3 and self.line is not None):
self.line.set_data(
[self.previous_point[0], self.start_point[0]],
[self.previous_point[1], self.start_point[1]])
self.verts[-1].append(self.start_point)
self.fig.canvas.draw()
self.line = None
path = Path(self.verts[-1])
self.paths.append(path)
self.verts.append([])
self.polys.append([])
self.update_points()
|
BSD 3-Clause New or Revised License
|
google-research/disentanglement_lib
|
disentanglement_lib/methods/semi_supervised/semi_supervised_utils.py
|
make_labeller
|
python
|
def make_labeller(labels,
dataset,
random_state,
labeller_fn=gin.REQUIRED):
return labeller_fn(labels, dataset, random_state)
|
Wrapper that creates labeller function.
|
https://github.com/google-research/disentanglement_lib/blob/86a644d4ed35c771560dc3360756363d35477357/disentanglement_lib/methods/semi_supervised/semi_supervised_utils.py#L101-L106
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import gin.tf.external_configurables
import gin.tf
def sample_supervised_data(supervised_seed,
ground_truth_data,
num_labelled_samples):
supervised_random_state = np.random.RandomState(supervised_seed)
sampled_factors = ground_truth_data.sample_factors(num_labelled_samples,
supervised_random_state)
sampled_observations = ground_truth_data.sample_observations_from_factors(
sampled_factors, supervised_random_state)
sampled_factors, factor_sizes = make_labeller(sampled_factors,
ground_truth_data,
supervised_random_state)
return sampled_observations, sampled_factors, factor_sizes
def train_test_split(observations, labels, num_labelled_samples,
train_percentage):
assert observations.shape[0] == num_labelled_samples, "Wrong observations shape."
num_labelled_samples_train = int(
math.ceil(num_labelled_samples * train_percentage))
num_labelled_samples_test = num_labelled_samples - num_labelled_samples_train
observations_train = observations[:num_labelled_samples_train, :, :, :]
observations_test = observations[num_labelled_samples_train:, :, :, :]
labels_train = labels[:num_labelled_samples_train, :]
labels_test = labels[num_labelled_samples_train:, :]
assert labels_test.shape[0] == num_labelled_samples_test, "Wrong size test."
return observations_train, labels_train, observations_test, labels_test
@gin.configurable("labeller", blacklist=["labels", "dataset"])
|
Apache License 2.0
|
heyman/locust
|
locust/util/cache.py
|
memoize
|
python
|
def memoize(timeout, dynamic_timeout=False):
cache = {"timeout":timeout}
def decorator(func):
def wrapper(*args, **kwargs):
start = time()
if (not "time" in cache) or (start - cache["time"] > cache["timeout"]):
cache["result"] = func(*args, **kwargs)
cache["time"] = time()
if dynamic_timeout and cache["time"] - start > cache["timeout"]:
cache["timeout"] *= 2
return cache["result"]
def clear_cache():
if "time" in cache:
del cache["time"]
if "result" in cache:
del cache["result"]
wrapper.clear_cache = clear_cache
return wrapper
return decorator
|
Memoization decorator with support for timeout.
If dynamic_timeout is set, the cache timeout is doubled if the cached function
takes longer time to run than the timeout time
|
https://github.com/heyman/locust/blob/4baeb1d5ab9c291a9a48798b7d16c86184b3e831/locust/util/cache.py#L6-L33
|
from __future__ import absolute_import
from time import time
|
MIT License
|
dogoncouch/logdissect
|
logdissect/filters/source.py
|
FilterModule.__init__
|
python
|
def __init__(self, args=None):
self.name = "source"
self.desc = "match a log source"
if args:
args.add_argument('--source', action='append', dest='source',
help='match a log source')
|
Initialize the log source filter module
|
https://github.com/dogoncouch/logdissect/blob/4fbb96a1717d277bbfdabff37ffecfe5f9c02464/logdissect/filters/source.py#L26-L33
|
from logdissect.filters.type import FilterModule as OurModule
class FilterModule(OurModule):
|
MIT License
|
openxaiproject/neural-conversation-models-response-evaluation
|
SSREM/configs.py
|
str2bool
|
python
|
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
string to boolean
|
https://github.com/openxaiproject/neural-conversation-models-response-evaluation/blob/72c2e71f0a5068733762078dfbb5f1405ea0be00/SSREM/configs.py#L22-L29
|
import os
import argparse
from datetime import datetime
from pathlib import Path
import pprint
from torch import optim
import platform
import torch.nn as nn
project_dir = Path(__file__).resolve().parent.parent
optimizer_dict = {'RMSprop': optim.RMSprop, 'Adam': optim.Adam}
plt_sys = platform.system()
rnn_dict = {'lstm': nn.LSTM, 'gru': nn.GRU}
if "Windows" in plt_sys:
save_dir = Path(f"D:/git/conversation-metrics/results/")
elif "Linux" in plt_sys:
username = Path.home().name
save_dir = Path(f'/home/{username}/git/conversation-metrics/results/')
|
MIT License
|
google-research/valan
|
r2r/image_encoder.py
|
ImageEncoder._dense_pooling
|
python
|
def _dense_pooling(self, image_features):
batch_size = image_features.shape[0]
flat_image_features = tf.reshape(image_features, [batch_size, -1])
v_t = self._dense_pooling_layer(flat_image_features)
return v_t
|
Flattens and projects all views of pano features into LSTM hidden dim.
|
https://github.com/google-research/valan/blob/9fc6e38f411e6cb76408bf033cdc056ace980973/r2r/image_encoder.py#L119-L124
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
class ImageEncoder(tf.keras.layers.Layer):
def __init__(self,
attention_space_size,
num_lstm_units,
num_hidden_layers=1,
l2_scale=0.0,
dropout=0.0,
concat_context=False,
layernorm=False,
mode=None,
name=None,
use_attention_pooling=True):
super(ImageEncoder, self).__init__(name=name if name else 'image_encoder')
self._use_attention_pooling = use_attention_pooling
if self._use_attention_pooling:
self._projection_hidden_layer = tf.keras.layers.Dense(
attention_space_size, name='project_hidden')
self._projection_image_feature = tf.keras.layers.Dense(
attention_space_size, name='project_feature')
else:
self._dense_pooling_layer = tf.keras.layers.Dense(
num_lstm_units, name='dense_pooling')
self._cells = []
for layer_id in range(num_hidden_layers):
self._cells.append(
tf.keras.layers.LSTMCell(
num_lstm_units,
kernel_regularizer=tf.keras.regularizers.l2(l2_scale),
recurrent_regularizer=tf.keras.regularizers.l2(l2_scale),
name='lstm_layer_{}'.format(layer_id)))
self.history_context_encoder = tf.keras.layers.StackedRNNCells(self._cells)
self.attention = tf.keras.layers.Attention(use_scale=True, name='attention')
self._use_layernorm = layernorm
self._context_dropout = tf.keras.layers.Dropout(dropout)
self._context_layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self._hidden_layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self._image_layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self._concat_context = concat_context
self._dense = tf.keras.layers.Dense(num_lstm_units, activation='tanh')
if dropout > 0.0 and not mode:
raise ValueError(
'`mode` must be set to train/eval/predict when using dropout.')
self._is_training = True if mode == 'train' else False
def _attention_pooling(self, image_features, current_lstm_state):
previous_step_lstm_output = current_lstm_state[-1][0]
hidden_state = tf.expand_dims(previous_step_lstm_output, axis=1)
x = self._projection_hidden_layer(hidden_state)
if self._use_layernorm:
x = self._hidden_layernorm(x)
y = self._projection_image_feature(image_features)
if self._use_layernorm:
y = self._image_layernorm(y)
v_t = self.attention([x, y])
v_t = tf.squeeze(v_t, axis=1)
return v_t
|
Apache License 2.0
|
4dnucleome/partseg
|
package/PartSegCore/analysis/measurement_calculation.py
|
MeasurementResult.get_global_parameters
|
python
|
def get_global_parameters(self):
if FILE_NAME_STR in self._data_dict:
name = self._data_dict[FILE_NAME_STR]
res = [name]
iterator = iter(self._data_dict.keys())
try:
next(iterator)
except StopIteration:
pass
else:
res = []
iterator = iter(self._data_dict.keys())
for el in iterator:
per_comp = self._type_dict[el][0]
val = self._data_dict[el]
if per_comp != PerComponent.Yes:
res.append(val)
return res
|
Get only parameters which are not 'PerComponent.Yes
|
https://github.com/4dnucleome/partseg/blob/f6bb1bb02c006f2e009e873a0e3bad87469cc90e/package/PartSegCore/analysis/measurement_calculation.py#L189-L207
|
from collections import OrderedDict
from enum import Enum
from functools import reduce
from math import pi
from typing import (
Any,
Callable,
Dict,
Generator,
Iterator,
List,
MutableMapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import numpy as np
import pandas as pd
import SimpleITK
from mahotas.features import haralick
from scipy.spatial.distance import cdist
from sympy import symbols
from PartSegCore.segmentation.restartable_segmentation_algorithms import LowerThresholdAlgorithm
from PartSegImage import Image
from .. import autofit as af
from ..algorithm_describe_base import AlgorithmProperty, Register, ROIExtractionProfile
from ..channel_class import Channel
from ..class_generator import enum_register
from ..mask_partition_utils import BorderRim, MaskDistanceSplit
from ..roi_info import ROIInfo
from ..universal_const import UNIT_SCALE, Units
from .calculate_pipeline import calculate_segmentation_step
from .measurement_base import AreaType, Leaf, MeasurementEntry, MeasurementMethodBase, Node, PerComponent
NO_COMPONENT = -1
class CorrelationEnum(str, Enum):
pearson = "Pearson correlation coefficient"
manders = "Mander's overlap coefficient"
intensity = "Intensity correlation quotient"
spearman = "Spearman rank correlation"
def __str__(self):
return self.value
class ProhibitedDivision(Exception):
pass
class SettingsValue(NamedTuple):
function: Callable
help_message: str
arguments: Optional[dict]
is_component: bool
default_area: Optional[AreaType] = None
class ComponentsInfo(NamedTuple):
roi_components: np.ndarray
mask_components: np.ndarray
components_translation: Dict[int, List[int]]
def empty_fun(_a0=None, _a1=None):
MeasurementValueType = Union[float, List[float], str]
MeasurementResultType = Tuple[MeasurementValueType, str]
MeasurementResultInputType = Tuple[MeasurementValueType, str, Tuple[PerComponent, AreaType]]
FILE_NAME_STR = "File name"
class MeasurementResult(MutableMapping[str, MeasurementResultType]):
def __init__(self, components_info: ComponentsInfo):
self.components_info = components_info
self._data_dict = OrderedDict()
self._units_dict: Dict[str, str] = {}
self._type_dict: Dict[str, Tuple[PerComponent, AreaType]] = {}
self._units_dict["Mask component"] = ""
self._units_dict["Segmentation component"] = ""
def __str__(self):
return "".join(
f"{key}: {val}; type {self._type_dict[key]}, units {self._units_dict[key]}\n"
for key, val in self._data_dict.items()
)
def __setitem__(self, k: str, v: MeasurementResultInputType) -> None:
self._data_dict[k] = v[0]
self._units_dict[k] = v[1]
self._type_dict[k] = v[2]
def __delitem__(self, v: str) -> None:
del self._data_dict[v]
del self._units_dict[v]
del self._type_dict[v]
def __getitem__(self, k: str) -> MeasurementResultType:
return self._data_dict[k], self._units_dict[k]
def __len__(self) -> int:
return len(self._data_dict)
def __iter__(self) -> Iterator[str]:
return iter(self._data_dict)
def to_dataframe(self) -> pd.DataFrame:
data = self.get_separated()
columns = [
f"{label} ({units})" if units else label for label, units in zip(self.get_labels(), self.get_units())
]
df = pd.DataFrame(data, columns=columns)
return df.astype({"Segmentation component": int}).set_index("Segmentation component")
def set_filename(self, path_fo_file: str):
self._data_dict[FILE_NAME_STR] = path_fo_file
self._type_dict[FILE_NAME_STR] = PerComponent.No, AreaType.ROI
self._units_dict[FILE_NAME_STR] = ""
self._data_dict.move_to_end(FILE_NAME_STR, False)
def get_component_info(self) -> Tuple[bool, bool]:
has_mask_components = any((x == PerComponent.Yes and y != AreaType.ROI for x, y in self._type_dict.values()))
has_segmentation_components = any(
(x == PerComponent.Yes and y == AreaType.ROI for x, y in self._type_dict.values())
)
return has_mask_components, has_segmentation_components
def get_labels(self, expand=True) -> List[str]:
if not expand:
return list(self.keys())
has_mask_components, has_segmentation_components = self.get_component_info()
labels = list(self._data_dict.keys())
index = 1 if FILE_NAME_STR in self._data_dict else 0
if has_mask_components:
labels.insert(index, "Mask component")
if has_segmentation_components:
labels.insert(index, "Segmentation component")
return labels
def get_units(self) -> List[str]:
return [self._units_dict[x] for x in self.get_labels()]
def get_global_names(self):
labels = list(self._data_dict.keys())
return [x for x in labels if self._type_dict[x][0] != PerComponent.Yes]
|
BSD 3-Clause New or Revised License
|
petrochukm/hparams
|
hparams/hparams.py
|
configurable
|
python
|
def configurable(function: _ConfiguredFunction = None) -> _ConfiguredFunction:
if not function:
return configurable
function_signature = _get_function_signature(function)
function_parameters = list(_get_function_parameters(function).values())
function_default_kwargs = _get_function_default_kwargs(function)
def _get_configuration():
return _configuration[function_signature] if function_signature in _configuration else {}
@wraps(function)
def decorator(*args, **kwargs):
global _configuration
_resolve_skipped()
config = _get_configuration()
if function_signature not in _configuration:
warnings.warn('@configurable: No config for `%s`. ' % (function_signature,))
args, kwargs = _merge_args(function_parameters, args, kwargs, config,
function_default_kwargs, function_signature)
[a._raise() for a in itertools.chain(args, kwargs.values()) if isinstance(a, _HParam)]
return function(*args, **kwargs)
def get_configured_partial():
return partial(decorator, **_get_configuration())
decorator.get_configured_partial = get_configured_partial
decorator._configurable = True
if hasattr(function, '__code__'):
_code_to_function[function.__code__] = function
else:
logger.warning(
'@configurable: `%s` does not have a `__code__` attribute; '
'therefore, this cannot verify if `HParams` are injected. '
'This should only affect Python `builtins`.', function_signature)
return cast(_ConfiguredFunction, decorator)
|
Decorator enables configuring module arguments.
Decorator enables one to set the arguments of a module via a global configuration. The decorator
also stores the parameters the decorated function was called with.
Args:
None
Returns:
(callable): Decorated function
|
https://github.com/petrochukm/hparams/blob/4c3288375a8521ec3d706677fc1b2a83384dade7/hparams/hparams.py#L549-L610
|
from functools import lru_cache
from functools import partial
from functools import wraps
from importlib import import_module
from pathlib import Path
from typing import cast
from typing import get_type_hints
import inspect
import itertools
import logging
import pprint
import sys
import traceback
import typing
import warnings
from typeguard import check_type
logger = logging.getLogger(__name__)
pretty_printer = pprint.PrettyPrinter()
class HParams(dict):
pass
_HParamReturnType = typing.TypeVar('_HParamReturnType')
class _HParam():
def __init__(self, type_=typing.Any):
stack = traceback.extract_stack(limit=3)[-3]
self.type = type_
self.error_message = 'The parameter set to `HParam` at %s:%s must be configured.' % (
stack.filename, stack.lineno)
for attribute in [
'__contains__', '__hash__', '__len__', '__call__', '__add__', '__sub__', '__mul__',
'__floordiv__', '__div__', '__mod__', '__pow__', '__lshift__', '__rshift__',
'__and__', '__xor__', '__or__', '__iadd__', '__isub__', '__imul__', '__idiv__',
'__ifloordiv__', '__imod__', '__ipow__', '__ilshift__', '__irshift__', '__iand__',
'__ixor__', '__ior__', '__neg__', '__pos__', '__abs__', '__invert__', '__complex__',
'__int__', '__long__', '__float__', '__oct__', '__hex__', '__lt__', '__le__',
'__eq__', '__ne__', '__ge__', '__gt__', '__cmp__', '__round__', '__getitem__',
'__setitem__', '__delitem__', '__iter__', '__reversed__', '__copy__', '__deepcopy__'
]:
setattr(self.__class__, attribute, self._raise)
def _raise(self, *args, **kwargs):
raise ValueError(self.error_message)
def __getattribute__(self, name):
if name in ['error_message', '_raise', '__dict__', '__class__', 'type']:
return super().__getattribute__(name)
self._raise()
def HParam(type_=typing.Any) -> _HParamReturnType:
return cast(_HParamReturnType, _HParam(type_=type_))
@lru_cache()
def _get_function_signature(func):
try:
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
absolute_filename = Path(inspect.getfile(func))
relative_filename = None
for path in sys.path:
try:
new_filename = str(absolute_filename.relative_to(Path(path).absolute()))
if relative_filename is None:
relative_filename = new_filename
elif len(new_filename) > len(relative_filename):
relative_filename = new_filename
except ValueError:
pass
filename = str(relative_filename if relative_filename is not None else absolute_filename)
return filename.replace('/', '.')[:-3] + '.' + func.__qualname__
except TypeError:
return '#' + func.__qualname__
@lru_cache()
def _get_function_path(func):
if hasattr(func, '__qualname__'):
return inspect.getmodule(func).__name__ + '.' + func.__qualname__
else:
return inspect.getmodule(func).__name__
@lru_cache()
def _get_function_parameters(func):
return inspect.signature(func).parameters
@lru_cache()
def _get_function_default_kwargs(func):
return {
k: v.default
for k, v in _get_function_parameters(func).items()
if v.default is not inspect.Parameter.empty
}
def _function_has_keyword_parameters(func, kwargs):
parameters = _get_function_parameters(func)
has_var_keyword = any([
parameter.kind == inspect.Parameter.VAR_KEYWORD for parameter in list(parameters.values())
])
type_hints = get_type_hints(func)
for kwarg in kwargs.keys():
if not has_var_keyword and (kwarg not in parameters or
parameters[kwarg].kind == inspect.Parameter.VAR_POSITIONAL):
raise TypeError('Function `%s` does not accept configured parameter `%s`.' %
(_get_function_signature(func), kwarg))
try:
if (kwarg in parameters and parameters[kwarg].default is not inspect.Parameter.empty and
isinstance(parameters[kwarg].default, _HParam)):
check_type(kwarg, kwargs[kwarg], parameters[kwarg].default.type)
except TypeError:
raise TypeError('Function `%s` requires parameter `%s` to be of type `%s`.' %
(_get_function_signature(func), kwarg, parameters[kwarg].default.type))
try:
if kwarg in type_hints:
check_type(kwarg, kwargs[kwarg], type_hints[kwarg])
except TypeError:
raise TypeError('Function `%s` requires parameter `%s` to be of type `%s`.' %
(_get_function_signature(func), kwarg, type_hints[kwarg]))
_skip_resolution = {}
def _get_skip_resolution():
return _skip_resolution
def _resolve_configuration_helper(dict_, keys):
if not isinstance(dict_, HParams) and isinstance(dict_, dict):
return_ = {}
if len(dict_) == 0:
raise TypeError('Failed to find `HParams` object along path `%s`.' % '.'.join(keys))
for key in dict_:
resolved = _resolve_configuration_helper(dict_[key], keys[:] + [key])
if len(set(resolved.keys()) & set(return_.keys())) > 0:
raise TypeError('Function `%s` was defined twice in configuration.' %
'.'.join(keys + [key]))
return_.update(resolved)
return return_
elif not isinstance(dict_, HParams) and not isinstance(dict_, dict):
raise TypeError('Failed to find `HParams` object along path `%s`.' % '.'.join(keys))
trace = []
for i in range(1, len(keys)):
try:
module_path = '.'.join(keys[:i])
if _is_lazy_resolution:
attribute = sys.modules.get(module_path, None)
if attribute is None:
_skip_resolution[tuple(keys[:])] = dict_
return {}
else:
attribute = import_module(module_path)
for j, key in enumerate(keys[i:]):
if key[0] == '<' and key[-1] == '>':
logger.warning('Skipping checks for `%s`, this cannot import `%s`.',
'.'.join(keys), key)
signature = (_get_function_signature(attribute) + '.' + '.'.join(keys[i:][j:]))
return {signature: dict_}
else:
attribute = getattr(attribute, key)
if hasattr(attribute, '_configurable'):
_function_has_keyword_parameters(attribute.__wrapped__, dict_)
return {_get_function_signature(attribute.__wrapped__): dict_}
else:
trace.append('`%s` is not decorated with `configurable`.' % '.'.join(keys))
except ImportError:
trace.append('ImportError: Failed to run `import %s`.' % module_path)
except AttributeError:
trace.append('AttributeError: `%s` not found in `%s`.' % (key, '.'.join(keys[:i + j])))
trace.reverse()
warnings.warn('Skipping configuration for `%s` because this ' % '.'.join(keys) +
'failed to find a `configurable` decorator for that configuration.\n' +
'Attempts (most recent attempt last):\n %s' % ('\n '.join(trace),))
return {}
def _resolve_configuration(dict_):
return _resolve_configuration_helper(dict_, [])
def _resolve_skipped():
global _skip_resolution
copy_skip_resolution = _skip_resolution.copy()
_skip_resolution = {}
for keys, dict_ in copy_skip_resolution.items():
resolved = _resolve_configuration_helper(dict_, list(keys))
_add_resolved_config(resolved)
def _add_resolved_config(resolved):
for key in resolved:
if key in _configuration:
_configuration[key].update(resolved[key])
else:
_configuration[key] = resolved[key]
def _parse_configuration_helper(dict_, parsed_dict):
if not isinstance(dict_, dict) or isinstance(dict_, HParams):
return
for key in dict_:
if not (inspect.ismodule(key) or isinstance(key, str) or callable(key)):
raise TypeError('Key `%s` must be a string, module, or callable.' % key)
split = (key if isinstance(key, str) else _get_function_path(key)).split('.')
past_parsed_dict = parsed_dict
for i, split_key in enumerate(split):
if split_key == '':
raise TypeError('Improper key format `%s`.' % key)
if i == len(split) - 1 and (not isinstance(dict_[key], dict) or
isinstance(dict_[key], HParams)):
if split_key in parsed_dict:
raise TypeError('This key `%s` is a duplicate.' % key)
parsed_dict[split_key] = dict_[key]
else:
if split_key not in parsed_dict:
parsed_dict[split_key] = {}
parsed_dict = parsed_dict[split_key]
_parse_configuration_helper(dict_[key], parsed_dict)
parsed_dict = past_parsed_dict
return parsed_dict
def _parse_configuration(dict_):
return _parse_configuration_helper(dict_, {})
_configuration = {}
def add_config(config):
if len(config) == 0:
return
parsed = _parse_configuration(config)
resolved = _resolve_configuration(parsed)
_add_resolved_config(resolved)
def log_config():
logger.info('Global configuration:\n%s', pretty_printer.pformat(_configuration))
def get_config():
if _is_lazy_resolution and len(_skip_resolution) > 0:
logger.warning(
'There are unresolved configurations because lazy resolution was set to `True`; '
'therefore, this will only return a partial config.')
return _configuration
def clear_config():
global _configuration
_configuration = {}
_is_lazy_resolution = False
def set_lazy_resolution(bool_):
global _is_lazy_resolution
_is_lazy_resolution = bool_
_resolve_skipped()
def _merge_args(parameters, args, kwargs, config_kwargs, default_kwargs, print_name):
merged_kwargs = default_kwargs.copy()
merged_kwargs.update(config_kwargs)
for i, arg in enumerate(args):
if i >= len(parameters):
raise TypeError('Too many arguments (%d > %d) passed.' % (len(args), len(parameters)))
if parameters[i].kind == inspect.Parameter.VAR_POSITIONAL:
break
if (parameters[i].kind == inspect.Parameter.POSITIONAL_ONLY or
parameters[i].kind == inspect.Parameter.POSITIONAL_OR_KEYWORD):
if parameters[i].name in merged_kwargs:
value = merged_kwargs[parameters[i].name]
if parameters[i].name in config_kwargs or isinstance(value, _HParam):
warnings.warn(
'@configurable: Overwriting configured argument `%s=%s` in module `%s` '
'with `%s`.' % (parameters[i].name, str(value), print_name, arg))
del merged_kwargs[parameters[i].name]
for key, value in kwargs.items():
if key in config_kwargs or (key in merged_kwargs and
isinstance(merged_kwargs[key], _HParam)):
warnings.warn('@configurable: Overwriting configured argument `%s=%s` in module `%s` '
'with `%s`.' % (key, str(merged_kwargs[key]), print_name, value))
merged_kwargs.update(kwargs)
return args, merged_kwargs
def profile_func(frame, event, arg):
if (event != 'call' or not hasattr(frame, 'f_code') or not hasattr(frame, 'f_back') or
not hasattr(frame.f_back, 'f_code') or frame.f_code not in _code_to_function):
return
function = _code_to_function[frame.f_code]
last_filename = frame.f_back.f_code.co_filename
if not (__file__ == last_filename and frame.f_back.f_code.co_name == 'decorator'):
warnings.warn(
'@configurable: The decorator was not executed immediately before `%s` at (%s:%s); '
'therefore, it\'s `HParams` may not have been injected. ' %
(_get_function_signature(function), last_filename, frame.f_back.f_lineno))
sys.setprofile(profile_func)
_code_to_function = {}
_ConfiguredFunction = typing.TypeVar('_ConfiguredFunction', bound=typing.Callable[..., typing.Any])
|
MIT License
|
google/ml-fairness-gym
|
core.py
|
Metric._extract_history
|
python
|
def _extract_history(self, env):
history = env._get_history()
if flags.FLAGS.validate_history:
self._validate_history(history)
if self._realign_fn is not None:
return self._realign_fn(history)
return history
|
Gets and validates a history from an environment.
|
https://github.com/google/ml-fairness-gym/blob/5b1cd336b844059aa4e4426b54d1f0e6b8c4c7e9/core.py#L510-L517
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum
from typing import Any, Callable, Dict, List, Mapping, Optional, Text, Tuple, TypeVar, Union
from absl import flags
from absl import logging
import attr
import gin
import gym
from gym.utils import seeding
import gym.utils.json_utils
import more_itertools
import networkx as nx
import numpy as np
from recsim.simulator import recsim_gym
import simplejson as json
RANDOM_STATE_KEY = '__random_state__'
flags.DEFINE_bool(
'validate_history', False,
'If True, metrics check the validity of the history when measuring. '
'Can be turned off to save computation.')
class NotInitializedError(Exception):
pass
class InvalidObservationError(Exception):
pass
class InvalidRewardError(Exception):
pass
class BadFeatureFnError(Exception):
pass
class InvalidHistoryError(Exception):
pass
class EpisodeDoneError(Exception):
pass
class NotReproducibleError(Exception):
pass
def validate_reward(reward):
if reward is None:
return True
try:
float(reward)
except TypeError:
raise InvalidRewardError
class GymEncoder(json.JSONEncoder):
def default(self, obj):
try:
return obj.to_jsonable()
except AttributeError:
pass
if callable(obj):
return {'callable': obj.__name__}
if isinstance(obj, (bool, np.bool_)):
return int(obj)
if isinstance(obj, enum.Enum):
return {'__enum__': str(obj)}
if isinstance(obj, recsim_gym.RecSimGymEnv):
return 'RecSimGym'
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
if isinstance(obj, (bool, np.bool_)):
return str(obj)
if isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
if isinstance(obj, nx.Graph):
return nx.readwrite.json_graph.node_link_data(obj)
if isinstance(obj, np.random.RandomState):
state = obj.get_state()
return {
RANDOM_STATE_KEY:
(state[0], state[1].tolist(), state[2], state[3], state[4])
}
if isinstance(obj, Params) or isinstance(obj, State):
return obj.asdict()
return json.JSONEncoder.default(self, obj)
def to_json(dictionary, sort_keys=True, **kw):
return json.dumps(dictionary, cls=GymEncoder, sort_keys=sort_keys, **kw)
@attr.s(cmp=False)
class State(object):
asdict = attr.asdict
def to_json(self):
return to_json(self)
def __eq__(self, other):
return self.to_json() == other.to_json()
def __ne__(self, other):
return self.to_json() != other.to_json()
ActionType = Any
@attr.s
class HistoryItem(object):
state = attr.ib()
action = attr.ib()
def to_jsonable(self):
return attr.astuple(self)
def __iter__(self):
return iter(attr.astuple(self, recurse=False))
HistoryType = List[HistoryItem]
@gin.configurable
@attr.s
class Params(object):
asdict = attr.asdict
ParamsType = TypeVar('ParamsType', bound=Params)
class RewardFn(object):
def __call__(self, observation):
raise NotImplementedError
DEFAULT_GROUP = np.ones(1)
NO_GROUP = np.zeros(1)
DEFAULT_GROUP_SPACE = gym.spaces.MultiBinary(1)
class StateUpdater(object):
def update(self, state, action):
raise NotImplementedError
class NoUpdate(StateUpdater):
def update(self, state, action):
del state, action
class FairnessEnv(gym.Env):
observable_state_vars = {}
action_space = None
group_membership_var = None
assert (not group_membership_var or
(group_membership_var in observable_state_vars))
def __init__(self,
params = None,
initialize_observation_space = True):
self.history = []
self.state = None
self.reward_fn = None
if initialize_observation_space:
self.observation_space = gym.spaces.Dict(self.observable_state_vars)
self.initial_params = copy.deepcopy(params)
def get_group_identifier(observation):
return observation.get(self.group_membership_var, DEFAULT_GROUP)
self.group_identifier_fn = get_group_identifier
def step(
self,
action):
if self.state is None:
raise NotInitializedError(
'State is None. State must be initialized before taking a step.'
'If using core.FairnessEnv, subclass and implement necessary methods.'
)
if not self.action_space.contains(action):
raise gym.error.InvalidAction('Invalid action: %s' % action)
self._update_history(self.state, action)
self.state = self._step_impl(self.state, action)
observation = self._get_observable_state()
logging.debug('Observation: %s.', observation)
logging.debug('Observation space: %s.', self.observation_space)
assert self.observation_space.contains(
observation
), 'Observation %s is not contained in self.observation_space' % observation
reward = self.reward_fn(observation) if self.reward_fn is not None else 0
return observation, reward, self._is_done(), {}
def seed(self, seed = None):
rng, seed = seeding.np_random(seed)
self.state.rng = rng
return [seed]
def reset(self):
self._reset_history()
return self._get_observable_state()
def set_scalar_reward(self, reward_fn):
self.reward_fn = reward_fn
def serialize_history(self):
sanitized_history = [(json.loads(history_item.state.to_json()),
history_item.action) for history_item in self.history]
return json.dumps(
{
'environment': repr(self.__class__),
'history': sanitized_history
},
cls=GymEncoder,
sort_keys=True)
def _step_impl(self, state, action):
raise NotImplementedError
def _get_observable_state(self):
return {
var_name: np.array(getattr(self.state, var_name))
for var_name in self.observable_state_vars
}
def _get_reward(self):
return
def _is_done(self):
return False
def _get_history(self):
return self.history
def _get_state(self):
return copy.deepcopy(self.state)
def _update_history(self, state, action):
self.history.append(HistoryItem(state=copy.deepcopy(state), action=action))
def _set_history(self, history):
self.history = history
def _reset_history(self):
self.history = []
def _set_state(self, state):
self.state = state
return self
class Metric(object):
def __init__(self,
environment,
realign_fn = None):
self._environment = copy.deepcopy(environment)
self._environment_setter = self._environment._set_state
self._realign_fn = realign_fn
def _simulate(self, state, action):
env = self._environment_setter(state)
env.step(action)
simulated_state = env._get_state()
return simulated_state
def _validate_history(self, history):
history = copy.deepcopy(history)
for idx, (step, next_step) in enumerate(more_itertools.pairwise(history)):
simulated_state = self._simulate(step.state, step.action)
if simulated_state != next_step.state:
raise ValueError('Invalid history at step %d %s != %s' %
(idx, step, next_step))
|
Apache License 2.0
|
tum-pbs/phiflow
|
phi/math/backend/_profile.py
|
profile
|
python
|
def profile(backends=None, trace=True, subtract_trace_time=True, save: str or None = None) -> Profile:
backends = BACKENDS if backends is None else backends
prof = Profile(trace, backends, subtract_trace_time)
restore_data = _start_profiling(prof, backends)
try:
yield prof
finally:
_stop_profiling(prof, *restore_data)
if save is not None:
prof.save(save)
|
To be used in `with` statements, `with math.backend.profile() as prof: ...`.
Creates a `Profile` for the code executed within the context by tracking calls to the `backends` and optionally tracing the call.
Args:
backends: List of backends to profile, `None` to profile all.
trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function.
subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times
save: (Optional) File path to save the profile to. This will call `Profile.save()`.
Returns:
Created `Profile`
|
https://github.com/tum-pbs/phiflow/blob/4a85f8a5029aa4e30a791daa659f2c8e1536e37e/phi/math/backend/_profile.py#L472-L494
|
import inspect
import json
from contextlib import contextmanager
from time import perf_counter
from typing import Optional, Callable
from ._backend import Backend, BACKENDS, _DEFAULT
class BackendCall:
def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name):
self._start = start
self._stop = stop
self._backend = backend
self._function_name = function_name
self._args = {"Backend": backend.name}
def __repr__(self):
return f"{1000 * self._duration:.2f} ms {self._function_name}"
def print(self, include_parents, depth, min_duration, code_col, code_len):
if self._duration >= min_duration:
print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}")
@property
def _name(self):
return repr(self)
@property
def _duration(self):
return self._stop - self._start
def trace_json_events(self, include_parents) -> list:
backend_index = self._backend._index
name = self._function_name
return [
{
'name': name,
'ph': 'X',
'pid': 1,
'tid': backend_index+1,
'ts': int(round(self._start * 1000000)),
'dur': int(round((self._stop - self._start) * 1000000)),
'args': self._args
}
]
def call_count(self) -> int:
return 1
def add_arg(self, key, value):
assert key not in self._args
self._args[key] = value
class ExtCall:
def __init__(self,
parent: 'ExtCall' or None,
name: str,
level: int,
function: str,
code_context: list or None,
file_name: str,
line_number: int):
self._parent = parent
if parent is None:
self._parents = ()
else:
self._parents = parent._parents + (parent,)
self._children = []
self._converted = False
self._name = name
self._level = level
self._function = function
self._code_context = code_context
self._file_name = file_name
self._line_number = line_number
def common_call(self, stack: list):
if self._parent is None:
return self
if len(stack) < self._level:
return self._parent.common_call(stack)
for i in range(self._level - 1):
if self._parents[i+1]._function != stack[-1-i].function:
return self._parents[i]
return self
def add(self, child):
self._children.append(child)
@staticmethod
def determine_name(info):
fun = info.function
if 'self' in info.frame.f_locals:
if fun == '__init__':
return f"{type(info.frame.f_locals['self']).__name__}()"
return f"{type(info.frame.f_locals['self']).__name__}.{fun}"
if 'phi/math' in info.filename or 'phi\\math' in info.filename:
return f"math.{fun}"
else:
return fun
@property
def _start(self):
return self._children[0]._start
@property
def _stop(self):
return self._children[-1]._stop
@property
def _duration(self):
return sum(c._duration for c in self._children)
def call_count(self) -> int:
return sum(child.call_count() for child in self._children)
def __repr__(self):
if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({self._level})"
else:
context = self._code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def _closest_non_trivial_parent(self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if self._level > backtrack + 1:
call: ExtCall = self._parents[-backtrack-1]
return call._code_context[0].strip(), call._file_name, call._function, call._line_number
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if self._level > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X",
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, "", 0, "", "", "", -1)
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time
self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(call._level, len(stack)):
stack_frame = stack[len(stack) - i - 1]
name = ExtCall.determine_name(stack_frame)
sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno)
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self):
self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}},
{'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}},
] + [
{'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}}
for i, backend in enumerate(self._backends)
]
if self._trace:
if len(self._root._children) > 0:
data.extend(self._root.trace_json_events())
else:
data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], []))
with open(json_file, 'w') as file:
json.dump(data, file)
save_trace = save
def _children_to_properties(self):
children = self._root.children_to_properties()
for name, child in children.items():
setattr(self, name, child)
def add_external_message(self, message: str):
self._messages.append(message)
@contextmanager
def retime(self):
self._retime_index = 0
restore_data = _start_profiling(self, self._backends)
try:
yield None
finally:
_stop_profiling(self, *restore_data)
assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, "
self._retime_index = -1
@contextmanager
def _accumulate_average(self, n):
self._retime_index = 0
self._accumulating = True
restore_data = _start_profiling(self, self._backends)
try:
yield None
finally:
_stop_profiling(self, *restore_data)
assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, "
self._retime_index = -1
for call in self._backend_calls:
call._start /= n
call._stop /= n
self._accumulating = False
def _format_values(values: dict, backend):
def format_val(value):
if isinstance(value, str):
return f'"{value}"'
if isinstance(value, (int, float, complex, bool)):
return value
if isinstance(value, (tuple, list)):
return str([format_val(v) for v in value])
try:
shape = backend.shape(value)
dtype = backend.dtype(value)
try:
shape = (int(dim) if dim is not None else '?' for dim in shape)
except Exception:
pass
return f"{tuple(shape)}, {dtype}"
except BaseException:
return str(value)
lines = [f"{key}: {format_val(val)}" for key, val in values.items()]
return "\n".join(lines)
class ProfilingBackend:
def __init__(self, prof: Profile, backend: Backend, index: int):
self._backend = backend
self._profile = prof
self._index = index
self.name = backend.name
self.combine_types = backend.combine_types
self.auto_cast = backend.auto_cast
self.is_tensor = backend.is_tensor
self.is_available = backend.is_available
self.shape = backend.shape
self.staticshape = backend.staticshape
self.ndims = backend.ndims
self.dtype = backend.dtype
self.expand_dims = backend.expand_dims
self.reshape = backend.reshape
self.supports = backend.supports
for item_name in dir(backend):
item = getattr(backend, item_name)
if callable(item) and not hasattr(self, item_name):
def context(item=item, item_name=item_name, profiling_backend=self):
def call_fun(*args, **kwargs):
start = perf_counter()
result = item(*args, **kwargs)
stop = perf_counter()
prof._add_call(BackendCall(start, stop, profiling_backend, item_name), args, kwargs, result)
return result
return call_fun
setattr(self, item_name, context())
def call(self, f: Callable, *args, name=None):
start = perf_counter()
result = f(*args)
self._backend.block_until_ready(result)
stop = perf_counter()
self._profile._add_call(BackendCall(start, stop, self, name), args, {}, result)
return result
def __repr__(self):
return f"profile[{self._backend}]"
def __enter__(self):
_DEFAULT.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
_DEFAULT.pop(-1)
def __eq__(self, other):
return other is self or other is self._backend
def __hash__(self):
return hash(self._backend)
_PROFILE = []
@contextmanager
|
MIT License
|
cuthbertlab/music21-tools
|
trecento/capua.py
|
ruleOne
|
python
|
def ruleOne(note1, nonCapuaHarmony, capuaHarmony):
if nonCapuaHarmony == 'dissonance' and capuaHarmony == 'perfect cons':
note1.editorial.fictaColor = betterColor
elif nonCapuaHarmony == 'perfect cons' and capuaHarmony == 'dissonance':
note1.editorial.fictaColor = worseColor
|
Colors a note based on the rule dissonance -> perfect cons is better,
perfect cons -> dissonance is worse.
|
https://github.com/cuthbertlab/music21-tools/blob/78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee/trecento/capua.py#L789-L799
|
import unittest
from music21 import exceptions21
from . import cadencebook
from music21 import stream
from music21 import pitch
from music21 import interval
from music21 import environment
_MOD = 'trecento.capua.py'
environLocal = environment.Environment(_MOD)
RULE_ONE = 1
RULE_TWO = 2
RULE_THREE = 4
RULE_FOUR_A = 8
RULE_FOUR_B = 16
class CapuaException(exceptions21.Music21Exception):
pass
def applyCapuaToScore(thisWork):
for thisPart in thisWork.parts:
applyCapuaToStream(thisPart.flat.notes.stream())
def applyCapuaToCadencebookWork(thisWork):
for thisSnippet in thisWork.snippets:
applyCapuaToScore(thisSnippet)
def applyCapuaToStream(thisStream):
for n in thisStream.notes:
if hasattr(n.editorial, 'ficta'):
n.editorial.pmfcFicta = n.editorial.ficta
clearAccidental(n)
clearFicta(thisStream)
capuaRuleOne(thisStream)
capuaRuleTwo(thisStream)
capuaRuleThree(thisStream)
capuaRuleFourB(thisStream)
def capuaRuleOne(srcStream):
numChanged = 0
ssn = srcStream.flat.notesAndRests
for i in range(len(ssn) - 2):
n1 = ssn[i]
n2 = ssn[i + 1]
n3 = ssn[i + 2]
if n1.isRest or n2.isRest or n3.isRest:
continue
i1 = interval.notesToInterval(n1, n2)
i2 = interval.notesToInterval(n2, n3)
if (n1.pitch.accidental is not None
or n3.pitch.accidental is not None):
continue
if n2.step == 'A' or n2.step == 'D':
continue
if (i1.directedName == 'M-2'
and i2.directedName == 'M2'):
numChanged += 1
if 'capuaRuleNumber' in n2.editorial:
n2.editorial.capuaRuleNumber += RULE_ONE
else:
n2.editorial.capuaRuleNumber = RULE_ONE
if n2.pitch.accidental is not None and n2.pitch.accidental.name == 'flat':
n2.editorial.savedAccidental = n2.pitch.accidental
n2.pitch.accidental = None
n2.editorial.ficta = pitch.Accidental('natural')
n2.editorial.capuaFicta = pitch.Accidental('natural')
n1.style.color = 'blue'
n2.style.color = 'forestGreen'
n3.style.color = 'blue'
else:
n2.editorial.ficta = pitch.Accidental('sharp')
n2.editorial.capuaFicta = pitch.Accidental('sharp')
n1.style.color = 'blue'
n2.style.color = 'ForestGreen'
n3.style.color = 'blue'
return numChanged
def capuaRuleTwo(srcStream):
numChanged = 0
ssn = srcStream.flat.notesAndRests
for i in range(len(ssn) - 3):
n1 = ssn[i]
n2 = ssn[i + 1]
n3 = ssn[i + 2]
n4 = ssn[i + 3]
if (n1.isRest
or n2.isRest
or n3.isRest
or n4.isRest):
continue
i1 = interval.notesToInterval(n1, n2)
i2 = interval.notesToInterval(n2, n3)
i3 = interval.notesToInterval(n3, n4)
if (n1.pitch.accidental is not None
or n2.pitch.accidental is not None
or n4.pitch.accidental is not None):
continue
if n3.step == 'A' or n3.step == 'D':
continue
if (i1.directedName == 'M2'
and i2.directedName == 'm2'
and i3.directedName == 'M2'):
numChanged += 1
if 'capuaRuleNumber' in n3.editorial:
n3.editorial.capuaRuleNumber += RULE_TWO
else:
n3.editorial.capuaRuleNumber = RULE_TWO
if n3.pitch.accidental is not None and n3.pitch.accidental.name == 'flat':
n3.editorial.savedAccidental = n3.pitch.accidental
n3.pitch.accidental = None
n3.editorial.ficta = pitch.Accidental('natural')
n3.editorial.capuaFicta = pitch.Accidental('natural')
n1.style.color = 'purple'
n2.style.color = 'purple'
n3.style.color = 'ForestGreen'
n4.style.color = 'purple'
else:
n3.editorial.ficta = pitch.Accidental('sharp')
n3.editorial.capuaFicta = pitch.Accidental('sharp')
n1.style.color = 'purple'
n2.style.color = 'purple'
n3.style.color = 'ForestGreen'
n4.style.color = 'purple'
return numChanged
def capuaRuleThree(srcStream):
numChanged = 0
ssn = srcStream.flat.notesAndRests
for i in range(len(ssn) - 2):
n1 = ssn[i]
n2 = ssn[i + 1]
n3 = ssn[i + 2]
if n1.isRest or n2.isRest or n3.isRest:
continue
i1 = interval.notesToInterval(n1, n2)
i2 = interval.notesToInterval(n2, n3)
if (n1.pitch.accidental is not None
or n2.pitch.accidental is not None
or n3.pitch.accidental is not None):
continue
if n2.step == 'A' or n2.step == 'D':
continue
if (i1.directedName == 'M-3'
and i2.directedName == 'M2'):
numChanged += 1
if 'capuaRuleNumber' in n2.editorial:
n2.editorial.capuaRuleNumber += RULE_THREE
else:
n2.editorial.capuaRuleNumber = RULE_THREE
n2.editorial.ficta = pitch.Accidental('sharp')
n2.editorial.capuaFicta = pitch.Accidental('sharp')
n1.style.color = 'DeepPink'
n2.style.color = 'ForestGreen'
n3.style.color = 'DeepPink'
return numChanged
def capuaRuleFourA(srcStream):
numChanged = 0
ssn = srcStream.flat.notesAndRests
for i in range(len(ssn) - 2):
n1 = ssn[i]
n2 = ssn[i + 1]
n3 = ssn[i + 2]
if n1.isRest or n2.isRest or n3.isRest:
continue
i1 = interval.notesToInterval(n1, n2)
i2 = interval.notesToInterval(n2, n3)
if (n1.pitch.accidental is not None
or n2.pitch.accidental is not None
or n3.pitch.accidental is not None):
continue
if n2.step == 'A' or n2.step == 'D':
continue
if i1.directedName == 'm-3' and i2.directedName == 'M-2':
numChanged += 1
if 'capuaRuleNumber' in n2.editorial:
n2.editorial.capuaRuleNumber += RULE_FOUR_A
else:
n2.editorial.capuaRuleNumber = RULE_FOUR_A
n2.editorial.ficta = pitch.Accidental('flat')
n2.editorial.capuaFicta = pitch.Accidental('flat')
n1.style.color = 'orange'
n2.style.color = 'ForestGreen'
n3.style.color = 'orange'
return numChanged
def capuaRuleFourB(srcStream):
numChanged = 0
ssn = srcStream.flat.notesAndRests
for i in range(len(ssn) - 2):
n1 = ssn[i]
n2 = ssn[i + 1]
n3 = ssn[i + 2]
if n1.isRest or n2.isRest or n3.isRest:
continue
i1 = interval.notesToInterval(n1, n2)
i2 = interval.notesToInterval(n2, n3)
if (n1.pitch.accidental is not None
or n3.pitch.accidental is not None):
continue
if n2.step == 'A' or n2.step == 'D':
continue
if i1.directedName == 'm3' and i2.directedName == 'M2':
numChanged += 1
if 'capuaRuleNumber' in n2.editorial:
n2.editorial.capuaRuleNumber += RULE_FOUR_B
else:
n2.editorial.capuaRuleNumber = RULE_FOUR_B
if n2.pitch.accidental is not None and n2.pitch.accidental.name == 'flat':
n2.editorial.savedAccidental = n2.pitch.accidental
n2.pitch.accidental = None
n2.editorial.ficta = pitch.Accidental('natural')
n2.editorial.capuaFicta = pitch.Accidental('natural')
n1.style.color = 'orange'
n2.style.color = 'green'
n3.style.color = 'orange'
else:
n2.editorial.ficta = pitch.Accidental('sharp')
n2.editorial.capuaFicta = pitch.Accidental('sharp')
n1.style.color = 'orange'
n2.style.color = 'green'
n3.style.color = 'orange'
return numChanged
def clearFicta(srcStream1):
for n2 in srcStream1.flat.notes:
if hasattr(n2.editorial, 'ficta'):
n2.editorial.savedFicta = n2.editorial.ficta
n2.editorial.ficta = None
def restoreFicta(srcStream1):
for n2 in srcStream1:
if hasattr(n2.editorial, 'savedFicta') and n2.editorial.savedFicta is not None:
n2.editorial.ficta = n2.editorial.savedFicta
n2.editorial.savedFicta = None
def clearAccidental(note1):
if note1.pitch.accidental is not None:
note1.editorial.savedAccidental = note1.pitch.accidental
note1.pitch.accidental = None
def restoreAccidental(note1):
if hasattr(note1.editorial, 'savedAccidental'):
note1.pitch.accidental = note1.editorial.savedAccidental
note1.editorial.savedAccidental = None
def fictaToAccidental(note1):
if note1.editorial.ficta is not None:
if note1.pitch.accidental is not None:
clearAccidental(note1)
note1.pitch.accidental = note1.editorial.ficta
def pmfcFictaToAccidental(note1):
if (hasattr(note1.editorial, 'pmfcFicta')
and note1.editorial.pmfcFicta is not None):
clearAccidental(note1)
note1.pitch.accidental = note1.editorial.pmfcFicta
def capuaFictaToAccidental(note1):
if (hasattr(note1.editorial, 'capuaFicta')
and note1.editorial.capuaFicta is not None):
clearAccidental(note1)
note1.pitch.accidental = note1.editorial.capuaFicta
def evaluateRules(srcStream1, srcStream2):
bothCount = evaluateCapuaTwoStreams(srcStream1, srcStream2)
return bothCount
def evaluateCapuaOnesrcStream(srcStream1, srcStream2):
applyCapuaToStream(srcStream1)
for note1 in srcStream1:
capuaFictaToAccidental(note1)
srcStream1Count = compareOnesrcStream(srcStream1, srcStream2, 'capua1srcStream')
for note1 in srcStream1:
restoreAccidental(note1)
return srcStream1Count
def evaluateCapuaTwoStreams(srcStream1, srcStream2):
applyCapuaToStream(srcStream1)
applyCapuaToStream(srcStream2)
for note1 in srcStream1:
capuaFictaToAccidental(note1)
for note2 in srcStream2:
capuaFictaToAccidental(note2)
srcStream1Count = compareOnesrcStream(srcStream1, srcStream2, 'capua2srcStream')
srcStream2Count = compareOnesrcStream(srcStream2, srcStream1, 'capua2srcStream')
for note1 in srcStream1:
restoreAccidental(note1)
for note2 in srcStream2:
restoreAccidental(note2)
bothCount = {
'srcStream1Count': srcStream1Count,
'srcStream2Count': srcStream2Count,
}
return bothCount
def evaluateEditorsFicta(srcStream1, srcStream2):
for note1 in srcStream1:
pmfcFictaToAccidental(note1)
for note2 in srcStream2:
pmfcFictaToAccidental(note2)
editorProfile = compareOnesrcStream(srcStream1, srcStream2, 'editor')
for note1 in srcStream1:
restoreAccidental(note1)
for note2 in srcStream2:
restoreAccidental(note2)
return editorProfile
def evaluateWithoutFicta(srcStream1, srcStream2):
clearFicta(srcStream1)
clearFicta(srcStream2)
noneProfile1 = compareOnesrcStream(srcStream1, srcStream2, '')
restoreFicta(srcStream1)
restoreFicta(srcStream2)
return noneProfile1
PerfectCons = ['P1', 'P5', 'P8']
ImperfCons = ['m3', 'M3', 'm6', 'M6']
Others = ['m2', 'M2', 'A2', 'd3', 'A3', 'd4', 'P4', 'A4', 'd5', 'A5', 'd6',
'A6', 'd7', 'm7', 'M7', 'A7']
PERFCONS = 1
IMPERFCONS = 2
OTHERS = 3
def compareThreeFictas(srcStream1, srcStream2):
srcStream1.attachIntervalsBetweenStreams(srcStream2)
srcStream2.attachIntervalsBetweenStreams(srcStream1)
for note1 in srcStream1.notes:
if hasattr(note1.editorial.harmonicInterval, 'name'):
note1.editorial.normalHarmonicInterval = note1.editorial.harmonicInterval.name
if 'pmfcFicta' in note1.editorial:
pmfcFictaToAccidental(note1)
note1.editorial.harmonicInterval.reinit()
if hasattr(note1.editorial.harmonicInterval, 'name'):
note1.editorial.pmfcHarmonicInterval = note1.editorial.harmonicInterval.name
restoreAccidental(note1)
else:
if hasattr(note1.editorial.harmonicInterval, 'name'):
note1.editorial.pmfcHarmonicInterval = note1.editorial.harmonicInterval.name
if 'capuaFicta' in note1.editorial:
capuaFictaToAccidental(note1)
note1.editorial.harmonicInterval.reinit()
if hasattr(note1.editorial.harmonicInterval, 'name'):
note1.editorial.capuaHarmonicInterval = note1.editorial.harmonicInterval.name
restoreAccidental(note1)
else:
if hasattr(note1.editorial.harmonicInterval, 'name'):
note1.editorial.capuaHarmonicInterval = note1.editorial.harmonicInterval.name
def compareSrcStreamCapuaToEditor(srcStream1):
totalDict = {
'totalNotes': 0,
'pmfcAlt': 0,
'capuaAlt': 0,
'pmfcNotCapua': 0,
'capuaNotPmfc': 0,
'pmfcAndCapua': 0,
}
for note1 in srcStream1.flat.notesAndRests:
thisDict = compareNoteCapuaToEditor(note1)
for thisKey in thisDict:
totalDict[thisKey] += thisDict[thisKey]
return totalDict
def compareNoteCapuaToEditor(note1):
statsDict = {
'totalNotes': 0,
'pmfcAlt': 0,
'capuaAlt': 0,
'pmfcNotCapua': 0,
'capuaNotPmfc': 0,
'pmfcAndCapua': 0,
}
if note1.isRest:
return statsDict
statsDict['totalNotes'] += 1
if 'pmfcFicta' in note1.editorial and 'capuaFicta' in note1.editorial:
statsDict['pmfcAlt'] += 1
statsDict['capuaAlt'] += 1
statsDict['pmfcAndCapua'] += 1
elif 'pmfcFicta' in note1.editorial:
statsDict['pmfcAlt'] += 1
statsDict['pmfcNotCapua'] += 1
elif 'capuaFicta' in note1.editorial:
statsDict['capuaAlt'] += 1
statsDict['capuaNotPmfc'] += 1
return statsDict
def compareOnesrcStream(srcStream1, srcStream2, fictaType='editor'):
perfectConsCount = 0
imperfConsCount = 0
othersCount = 0
srcStream1.attachIntervalsBetweenStreams(srcStream2)
srcStream2.attachIntervalsBetweenStreams(srcStream1)
for note1 in srcStream1.notes:
hasFicta = False
interval1 = note1.editorial.harmonicInterval
if interval1 is None:
continue
if note1.editorial.ficta is not None:
hasFicta = True
iType = getIntervalType(interval1)
if hasFicta and fictaType == 'editor':
environLocal.printDebug('found ficta of Editor type')
note1.editorial.editorFictaHarmony = iType
note1.editorial.editorFictaInterval = interval1
elif hasFicta and fictaType == 'capua1srcStream':
environLocal.printDebug('found ficta of capua1srcStream type')
note1.editorial.capua1FictaHarmony = iType
note1.editorial.capua1FictaInterval = interval1
elif hasFicta and fictaType == 'capua2srcStream':
environLocal.printDebug('found ficta of capua2srcStream type')
note1.editorial.capua2FictaHarmony = iType
note1.editorial.capua2FictaInterval = interval1
else:
note1.editorial.noFictaHarmony = iType
if iType == 'perfect cons':
perfectConsCount += 1
elif iType == 'imperfect cons':
imperfConsCount += 1
elif iType == 'dissonance':
othersCount += 1
else:
raise CapuaException('Hmmm.... I thought we already trapped this for errors...')
return [perfectConsCount, imperfConsCount, othersCount]
def getIntervalType(interval1):
if interval1 is None:
return None
elif interval1.diatonic is None:
return None
elif interval1.diatonic.name in PerfectCons:
return 'perfect cons'
elif interval1.diatonic.name in ImperfCons:
return 'imperfect cons'
elif interval1.diatonic.name in Others:
return 'dissonance'
else:
raise CapuaException(
'Wow! The first ' + interval1.niceName
+ ' I have ever seen in 14th century music! Go publish! (or check for errors...)'
)
betterColor = 'green'
worseColor = 'red'
neutralColor = 'blue'
def colorCapuaFicta(srcStream1, srcStream2, oneOrBoth='both'):
srcStream1.attachIntervalsBetweenStreams(srcStream2)
srcStream2.attachIntervalsBetweenStreams(srcStream1)
capuaCount = evaluateRules(srcStream1, srcStream2)
environLocal.printDebug('Capua count: %r' % capuaCount)
noFictaCount = evaluateWithoutFicta(srcStream1, srcStream2)
environLocal.printDebug('No ficta count: %r' % noFictaCount)
for note1 in srcStream1:
colorNote(note1, oneOrBoth)
for note2 in srcStream2:
colorNote(note2, oneOrBoth)
def colorNote(note1, oneOrBoth='both'):
if 'capua2FictaHarmony' not in note1.editorial:
return
elif oneOrBoth == 'one':
capuaHarmony = note1.editorial.capua1FictaHarmony
elif oneOrBoth == 'both':
capuaHarmony = note1.editorial.capua2FictaHarmony
else:
raise CapuaException('Please specify "one" or "both" for the variable "oneOrBoth".')
nonCapuaHarmony = note1.editorial.noFictaHarmony
ruleOne(note1, nonCapuaHarmony, capuaHarmony)
|
BSD 3-Clause New or Revised License
|
jggatc/pyjsdl
|
surface.py
|
Surface.toDataURL
|
python
|
def toDataURL(self, datatype=None):
if not datatype:
return self.canvas.toDataURL()
else:
return self.canvas.toDataURL(datatype)
|
Return surface data as a base64 data string.
Optional datatype to set data format, default to 'image/png'.
Implemented with HTML5 Canvas toDataURL method.
|
https://github.com/jggatc/pyjsdl/blob/c274ce2bc2099be4eeb2886a349fb3130231f307/surface.py#L315-L324
|
from pyjsdl.pyjsobj import HTML5Canvas
from pyjsdl.rect import Rect, rectPool
from pyjsdl.color import Color
from __pyjamas__ import JS
import sys
if sys.version_info < (3,):
from pyjsdl.util import _range as range
__docformat__ = 'restructuredtext'
class Surface(HTML5Canvas):
def __init__(self, size, *args, **kwargs):
self.width = int(size[0])
self.height = int(size[1])
HTML5Canvas.__init__(self, self.width, self.height)
HTML5Canvas.resize(self, self.width, self.height)
self._display = None
self._super_surface = None
self._offset = (0,0)
self._colorkey = None
self._stroke_style = None
self._fill_style = None
self._nonimplemented_methods()
def __str__(self):
s = "<%s(%dx%d)>"
return s % (self.__class__.__name__, self.width, self.height)
def __repr__(self):
return self.__str__()
def get_size(self):
return (self.width, self.height)
def get_width(self):
return self.width
def get_height(self):
return self.height
def resize(self, width, height):
self.width = int(width)
self.height = int(height)
HTML5Canvas.resize(self, self.width, self.height)
def get_rect(self, **attr):
rect = Rect(0, 0, self.width, self.height)
for key in attr:
setattr(rect, key, attr[key])
return rect
def copy(self):
surface = Surface((self.width,self.height))
surface.drawImage(self.canvas, 0, 0)
return surface
def subsurface(self, rect):
if rect in ('t', 'f'):
if not self._super_surface:
return
if rect == 't':
self.drawImage(self._super_surface.canvas, self._offset[0], self._offset[1], self.width, self.height, 0, 0, self.width, self.height)
else:
self._super_surface.drawImage(self.canvas, self._offset[0], self._offset[1])
return
if hasattr(rect, 'width'):
_rect = rect
else:
_rect = Rect(rect)
surf_rect = self.get_rect()
if not surf_rect.contains(_rect):
raise ValueError('subsurface outside surface area')
surface = self.getSubimage(_rect.x, _rect.y, _rect.width, _rect.height)
surface._super_surface = self
surface._offset = (_rect.x,_rect.y)
surface._colorkey = self._colorkey
return surface
def getSubimage(self, x, y, width, height):
surface = Surface((width,height))
surface.drawImage(self.canvas, x, y, width, height, 0, 0, width, height)
return surface
def blit(self, surface, position, area=None):
if not area:
rect = rectPool.get(position[0],position[1],surface.width,surface.height)
self.impl.canvasContext.drawImage(surface.canvas, rect.x, rect.y)
else:
rect = rectPool.get(position[0],position[1],area[2], area[3])
self.impl.canvasContext.drawImage(surface.canvas, area[0], area[1], area[2], area[3], rect.x, rect.y, area[2], area[3])
if self._display:
surface_rect = self._display._surface_rect
else:
surface_rect = self.get_rect()
changed_rect = surface_rect.clip(rect)
rectPool.append(rect)
return changed_rect
def _blits(self, surfaces):
ctx = self.impl.canvasContext
for surface, rect in surfaces:
ctx.drawImage(surface.canvas, rect.x, rect.y)
def _blit_clear(self, surface, rect_list):
ctx = self.impl.canvasContext
for r in rect_list:
ctx.drawImage(surface.canvas, r.x,r.y,r.width,r.height, r.x,r.y,r.width,r.height)
def set_colorkey(self, color, flags=None):
if self._colorkey:
self.replace_color((0,0,0,0),self._colorkey)
self._colorkey = None
if color:
self._colorkey = Color(color)
self.replace_color(self._colorkey)
return None
def get_colorkey(self):
if self._colorkey:
return ( self._colorkey.r,
self._colorkey.g,
self._colorkey.b,
self._colorkey.a )
else:
return None
def _getPixel(self, imagedata, index):
return JS("imagedata.data[@{{index}}];")
def _setPixel(self, imagedata, index, dat):
data = str(dat)
JS("imagedata.data[@{{index}}]=@{{data}};")
return
def replace_color(self, color, new_color=None):
pixels = self.impl.getImageData(0,0,self.width,self.height)
if hasattr(color, 'a'):
color1 = color
else:
color1 = Color(color)
if new_color is None:
alpha_zero = True
else:
if hasattr(new_color, 'a'):
color2 = new_color
else:
color2 = Color(new_color)
alpha_zero = False
if alpha_zero:
r1,g1,b1,a1 = color1.r, color1.g, color1.b, color1.a
a2 = 0
for i in range(0, len(pixels.data), 4):
if ( self._getPixel(pixels,i) == r1 and
self._getPixel(pixels,i+1) == g1 and
self._getPixel(pixels,i+2) == b1 and
self._getPixel(pixels,i+3) == a1 ):
self._setPixel(pixels, i+3, a2)
else:
r1,g1,b1,a1 = color1.r, color1.g, color1.b, color1.a
r2,g2,b2,a2 = color2.r, color2.g, color2.b, color2.a
for i in range(0, len(pixels.data), 4):
if ( self._getPixel(pixels,i) == r1 and
self._getPixel(pixels,i+1) == g1 and
self._getPixel(pixels,i+2) == b1 and
self._getPixel(pixels,i+3) == a1 ):
self._setPixel(pixels, i, r2)
self._setPixel(pixels, i+1, g2)
self._setPixel(pixels, i+2, b2)
self._setPixel(pixels, i+3, a2)
self.impl.putImageData(pixels,0,0,0,0,self.width,self.height)
return None
def get_at(self, pos):
pixel = self.impl.getImageData(pos[0], pos[1], 1, 1)
return Color([self._getPixel(pixel,i) for i in (0,1,2,3)])
def set_at(self, pos, color):
if self._fill_style != color:
self._fill_style = color
if hasattr(color, 'a'):
_color = color
else:
_color = Color(color)
self.setFillStyle(_color)
self.fillRect(pos[0], pos[1], 1, 1)
return None
def fill(self, color=None, rect=None):
if color is None:
HTML5Canvas.fill(self)
return
if color:
if self._fill_style != color:
self._fill_style = color
if hasattr(color, 'a'):
self.setFillStyle(color)
else:
self.setFillStyle(Color(color))
if not rect:
_rect = Rect(0, 0, self.width, self.height)
else:
if self._display:
surface_rect = self._display._surface_rect
else:
surface_rect = self.get_rect()
if hasattr(rect, 'width'):
_rect = surface_rect.clip( rect )
else:
_rect = surface_rect.clip( Rect(rect) )
if not _rect.width or not _rect.height:
return _rect
self.fillRect(_rect.x, _rect.y, _rect.width, _rect.height)
else:
_rect = Rect(0, 0, self.width, self.height)
self.clear()
return _rect
def get_parent(self):
return self._super_surface
def get_offset(self):
return self._offset
|
MIT License
|
nasa-ammos/ait-core
|
ait/core/dmc.py
|
toJulian
|
python
|
def toJulian(dt=None):
if dt is None:
dt = datetime.datetime.utcnow()
if dt.month < 3:
year = dt.year - 1
month = dt.month + 12
else:
year = dt.year
month = dt.month
A = int(year / 100.0)
B = 2 - A + int(A / 4.0)
C = ( (dt.second / 60.0 + dt.minute) / 60.0 + dt.hour ) / 24.0
jd = int(365.25 * (year + 4716))
jd += int(30.6001 * (month + 1)) + dt.day + B - 1524.5 + C
return jd
|
Converts a Python datetime to a Julian date, using the formula from
Meesus (1991). This formula is reproduced in D.A. Vallado (2004).
See:
D.A. Vallado, Fundamentals of Astrodynamics and Applications, p. 187
http://books.google.com/books?id=PJLlWzMBKjkC&lpg=PA956&vq=187&pg=PA187
|
https://github.com/nasa-ammos/ait-core/blob/40717498c20358303521cbd94e9bb2528b7a1f09/ait/core/dmc.py#L178-L203
|
import calendar
import datetime
import math
import os.path
import pickle
import requests
import ait.core
from ait.core import log
GPS_Epoch = datetime.datetime(1980, 1, 6, 0, 0, 0)
TICs = [ ]
TwoPi = 2 * math.pi
DOY_Format = '%Y-%jT%H:%M:%SZ'
ISO_8601_Format = '%Y-%m-%dT%H:%M:%SZ'
RFC3339_Format = '%Y-%m-%dT%H:%M:%S.%fZ'
_DEFAULT_FILE_NAME = 'leapseconds.dat'
LeapSeconds = None
def getTimestampUTC():
utc = datetime.datetime.utcnow()
ts_sec = calendar.timegm( utc.timetuple() )
ts_usec = utc.microsecond
return ts_sec, ts_usec
def getUTCDatetimeDOY(days=0, hours=0, minutes=0, seconds=0):
return (datetime.datetime.utcnow() +
datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)).strftime(DOY_Format)
def tic():
global TICs
begin = datetime.datetime.now()
TICs.append(begin)
def toc():
end = datetime.datetime.now()
return totalSeconds( end - TICs.pop() ) if len(TICs) else None
def toGPSWeekAndSecs(timestamp=None):
if timestamp is None:
timestamp = datetime.datetime.utcnow()
leap = LeapSeconds.get_GPS_offset_for_date(timestamp)
secsInWeek = 604800
delta = totalSeconds(timestamp - GPS_Epoch) + leap
seconds = delta % secsInWeek
week = int( math.floor(delta / secsInWeek) )
return (week, seconds)
def toGPSSeconds(timestamp):
delta = timestamp - GPS_Epoch
return (delta.days * 24 * 3600) + delta.seconds
def toGMST(dt=None):
if dt is None or type(dt) is datetime.datetime:
jd = toJulian(dt)
else:
jd = dt
tUT1 = (jd - 2451545.0) / 36525.0
gmst = 67310.54841 + (876600 * 3600 + 8640184.812866) * tUT1
gmst += 0.093104 * tUT1**2
gmst -= 6.2e-6 * tUT1**3
gmst /= 240.
gmst = math.radians(gmst) % TwoPi
if gmst < 0:
gmst += TwoPi
return gmst
|
MIT License
|
iclrandd/blackstone
|
blackstone/pipeline/abbreviations.py
|
contains
|
python
|
def contains(str, set: Set[str]):
return any([c in str for c in set])
|
Check whether sequence str contains ANY of the items in set.
|
https://github.com/iclrandd/blackstone/blob/4dadee00bc1f9bc3b44ba93f2d03b5e8a40516aa/blackstone/pipeline/abbreviations.py#L92-L94
|
from typing import Tuple, List, Optional, Set, Dict
from collections import defaultdict
from spacy.tokens import Span, Doc
from spacy.matcher import Matcher
def find_abbreviation(
long_form_candidate: Span, short_form_candidate: Span
) -> Tuple[Span, Optional[Span]]:
long_form = " ".join([x.text for x in long_form_candidate])
short_form = " ".join([x.text for x in short_form_candidate])
long_index = len(long_form) - 1
short_index = len(short_form) - 1
while short_index >= 0:
current_char = short_form[short_index].lower()
if not current_char.isalnum():
short_index -= 1
continue
while (
(long_index >= 0 and long_form[long_index].lower() != current_char)
or
(
short_index == 0
and long_index > 0
and long_form[long_index - 1].isalnum()
)
):
long_index -= 1
if long_index < 0:
return short_form_candidate, None
long_index -= 1
short_index -= 1
long_index = max(long_index, 0)
word_lengths = 0
starting_index = None
for i, word in enumerate(long_form_candidate):
word_lengths += len(word)
if word_lengths > long_index:
starting_index = i
break
return short_form_candidate, long_form_candidate[starting_index:]
|
Apache License 2.0
|
tunein/maestro
|
maestro/actions/create_alias.py
|
create_alias_action
|
python
|
def create_alias_action(name, alias, dry_run=False, publish=False):
print('Checking to see if Lambda exists')
if check(name):
print('Lambda found! Attempting to create alias %s' % alias)
alias_creation(lambda_name=name, new_alias=alias, dry_run=dry_run, publish=publish)
else:
print('Lambda not found!')
sys.exit(1)
|
Creates an alias for the given function, first we check to see if the lambda exists, then we create alias
args:
name: name of the lambda
alias: name of the alias we're creating
dry_run: boolean, if yes no action occurs, just printing
publish: boolean, if yes we publish a new version for this alias
|
https://github.com/tunein/maestro/blob/789205fdbe85242189c50e407445c57ca916e42c/maestro/actions/create_alias.py#L9-L28
|
import os
import sys
from maestro.providers.aws.check_existence import check
from maestro.providers.aws.alias import alias_creation
|
Apache License 2.0
|
yongyitang92/monet-features
|
feature_reader.py
|
Data_loader.repeat_last
|
python
|
def repeat_last(self, tensor, axis, new_size):
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
tensor_reshape = tf.reshape(tensor, [-1, self.feature_size])
repeat_last = tf.tile(tensor_reshape[-1:, :], [pad_shape[axis], 1])
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
repeat_last
], axis)
new_shape = tensor.get_shape().as_list()
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
|
Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using the last feature.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
Returns:
The resized tensor.
|
https://github.com/yongyitang92/monet-features/blob/09775bd8008b9b0064c205d158f7d4fc96f487ec/feature_reader.py#L111-L148
|
import os
import tensorflow as tf
from tensorflow import flags
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
tf.flags.DEFINE_string("data_dir", './train_features',
"Directory to read tfrecoreds from")
class Data_loader(object):
def __init__(self, filenames_, max_frames=32, num_classes=400, feature_size=1024):
self.feature_keys = ['rgb_feature', 'flow_feature', 'Monet_flow_feature']
self.feature_size = feature_size
self.num_classes = num_classes
self.max_frames = max_frames
dataset = tf.data.TFRecordDataset(filenames_)
dataset = dataset.map(self.parser_fn, num_parallel_calls=16)
dataset = dataset.repeat(1)
dataset = dataset.batch(1)
self.dataset = dataset.prefetch(2)
def parser_fn(self, example_proto):
contexts, features = tf.parse_single_sequence_example(
example_proto,
context_features={"num_frames": tf.FixedLenFeature(
[], tf.int64),
"num_feature": tf.FixedLenFeature(
[], tf.int64),
"label_index": tf.FixedLenFeature(
[], tf.int64),
"label_name": tf.FixedLenFeature(
[], tf.string),
"video": tf.FixedLenFeature(
[], tf.string)},
sequence_features={
feature_name: tf.FixedLenSequenceFeature([], dtype=tf.string)
for feature_name in self.feature_keys
})
labels = (tf.cast(tf.sparse_to_dense(contexts["label_index"], (self.num_classes,), 1,
validate_indices=False), tf.int32))
num_feature_type = len(self.feature_keys)
feature_matrices = [None] * num_feature_type
num_feature = -1
for feature_index in range(num_feature_type):
feature_matrix, num_frames_in_this_feature = self.get_video_matrix(features[self.feature_keys[feature_index]], subsample=False)
feature_matrices[feature_index] = feature_matrix
if num_feature == -1:
num_feature = num_frames_in_this_feature
rgb_feature = feature_matrices[0]
flow_feature = feature_matrices[1]
monet_flow_feature = feature_matrices[2]
num_feature = tf.minimum(num_feature, self.max_frames)
index_ones = tf.ones([num_feature, 1])
index_zeros = tf.zeros([self.max_frames - num_feature, 1])
feature_mask = tf.concat([index_ones, index_zeros], 0)
return rgb_feature, flow_feature, monet_flow_feature, labels, num_feature, feature_mask, contexts
def get_video_matrix(self, features, subsample=True):
if subsample:
decoded_features = tf.reshape(tf.decode_raw(features, tf.float32),
[-1, self.feature_size])
decoded_features = decoded_features[::8, :]
else:
decoded_features = tf.reshape(tf.decode_raw(features, tf.float32),
[-1, self.feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], self.max_frames)
feature_matrix = self.repeat_last(decoded_features, 0, self.max_frames)
return feature_matrix, num_frames
|
Apache License 2.0
|
quantumiracle/reinforcement_learning_for_traffic_light_control
|
7.ddpg_for_linear/common/cmd_util.py
|
make_mujoco_env
|
python
|
def make_mujoco_env(env_id, seed, reward_scale=1.0):
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env
|
Create a wrapped, monitored gym.Env for MuJoCo.
|
https://github.com/quantumiracle/reinforcement_learning_for_traffic_light_control/blob/464c17ba25ebcb49f78d6cdcc96d7fe3764d7508/7.ddpg_for_linear/common/cmd_util.py#L44-L58
|
import os
try:
from mpi4py import MPI
except ImportError:
MPI = None
import gym
from gym.wrappers import FlattenDictWrapper
import logger
from common import set_global_seeds
from common.atari_wrappers import make_atari, wrap_deepmind
from common.vec_env.subproc_vec_env import SubprocVecEnv
from common.vec_env.dummy_vec_env import DummyVecEnv
from common.retro_wrappers import RewardScaler
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0):
if wrapper_kwargs is None: wrapper_kwargs = {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
def make_env(rank):
def _thunk():
env = make_atari(env_id) if env_type == 'atari' else gym.make(env_id)
env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
env = Monitor(env,
logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)),
allow_early_resets=True)
if env_type == 'atari': return wrap_deepmind(env, **wrapper_kwargs)
elif reward_scale != 1: return RewardScaler(env, reward_scale)
else: return env
return _thunk
set_global_seeds(seed)
if num_env > 1: return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
else: return DummyVecEnv([make_env(start_index)])
|
Apache License 2.0
|
zlalanne/home-assistant-config
|
config/homeassistant/custom_components/hacs/repositories/repository.py
|
HacsRepository.ref
|
python
|
def ref(self):
if self.status.selected_tag is not None:
if self.status.selected_tag == self.information.default_branch:
return self.information.default_branch
return "tags/{}".format(self.status.selected_tag)
if self.releases.releases:
return "tags/{}".format(self.versions.available)
return self.information.default_branch
|
Return the ref.
|
https://github.com/zlalanne/home-assistant-config/blob/446947a8f3b9b8bd1597815d701572a515658626/config/homeassistant/custom_components/hacs/repositories/repository.py#L141-L151
|
import pathlib
import json
import os
import tempfile
import zipfile
from integrationhelper import Validate, Logger
from aiogithubapi import AIOGitHubException
from .manifest import HacsManifest
from ..helpers.misc import get_repository_name
from ..hacsbase import Hacs
from ..hacsbase.backup import Backup
from ..handler.download import async_download_file, async_save_file
from ..helpers.misc import version_left_higher_then_right
RERPOSITORY_CLASSES = {}
def register_repository_class(cls):
RERPOSITORY_CLASSES[cls.category] = cls
return cls
class RepositoryVersions:
available = None
available_commit = None
installed = None
installed_commit = None
class RepositoryStatus:
hide = False
installed = False
last_updated = None
new = True
selected_tag = None
show_beta = False
track = True
updated_info = False
first_install = True
class RepositoryInformation:
additional_info = None
authors = []
category = None
default_branch = None
description = ""
state = None
full_name = None
file_name = None
javascript_type = None
homeassistant_version = None
last_updated = None
uid = None
stars = 0
info = None
name = None
topics = []
class RepositoryReleases:
last_release = None
last_release_object = None
last_release_object_downloads = None
published_tags = []
objects = []
releases = False
class RepositoryPath:
local = None
remote = None
class RepositoryContent:
path = None
files = []
objects = []
single = False
class HacsRepository(Hacs):
def __init__(self):
self.content = RepositoryContent()
self.content.path = RepositoryPath()
self.information = RepositoryInformation()
self.repository_object = None
self.status = RepositoryStatus()
self.state = None
self.manifest = {}
self.repository_manifest = HacsManifest.from_dict({})
self.validate = Validate()
self.releases = RepositoryReleases()
self.versions = RepositoryVersions()
self.pending_restart = False
self.logger = None
@property
def pending_upgrade(self):
if self.status.installed:
if self.status.selected_tag is not None:
if self.status.selected_tag == self.information.default_branch:
if self.versions.installed_commit != self.versions.available_commit:
return True
return False
if self.display_installed_version != self.display_available_version:
return True
return False
@property
def config_flow(self):
if self.manifest:
if self.information.full_name == "hacs/integration":
return False
return self.manifest.get("config_flow", False)
return False
@property
|
MIT License
|
nelpy/nelpy
|
nelpy/core/_eventarray.py
|
BaseEventArray.label
|
python
|
def label(self):
if self._label is None:
logging.warning("label has not yet been specified")
return self._label
|
Label pertaining to the source of the event series.
|
https://github.com/nelpy/nelpy/blob/43d07f3652324f8b89348a21fde04019164ab536/nelpy/core/_eventarray.py#L319-L323
|
__all__ = ['EventArray',
'BinnedEventArray',
'SpikeTrainArray',
'BinnedSpikeTrainArray']
import numpy as np
import copy
import numbers
import logging
from abc import ABC, abstractmethod
from .. import core
from .. import utils
from .. import version
from ..utils_.decorators import keyword_deprecation, keyword_equivalence
from . import _accessors
class BaseEventArray(ABC):
__aliases__ = {}
__attributes__ = ["_fs", "_series_ids", "_series_labels", "_series_tags", "_label"]
def __init__(self, *, fs=None, series_ids=None, series_labels=None,
series_tags=None, label=None, empty=False, abscissa=None, ordinate=None, **kwargs):
self.__version__ = version.__version__
self.type_name = self.__class__.__name__
if abscissa is None:
abscissa = core.Abscissa()
if ordinate is None:
ordinate = core.Ordinate()
self._abscissa = abscissa
self._ordinate = ordinate
series_label = kwargs.pop('series_label', None)
if series_label is None:
series_label = 'series'
self._series_label = series_label
if empty:
for attr in self.__attributes__:
exec("self." + attr + " = None")
self._abscissa.support = type(self._abscissa.support)(empty=True)
self.loc = _accessors.ItemGetterLoc(self)
self.iloc = _accessors.ItemGetterIloc(self)
return
self._fs = None
self.fs = fs
if series_ids is None:
series_ids = list(range(1,self.n_series + 1))
series_ids = np.array(series_ids, ndmin=1)
if series_labels is None:
series_labels = series_ids
series_labels = np.array(series_labels, ndmin=1)
self.series_ids = series_ids
self.series_labels = series_labels
self._series_tags = series_tags
self.label = label
self.loc = _accessors.ItemGetterLoc(self)
self.iloc = _accessors.ItemGetterIloc(self)
def __renew__(self):
self.loc = _accessors.ItemGetterLoc(self)
self.iloc = _accessors.ItemGetterIloc(self)
def __repr__(self):
address_str = " at " + str(hex(id(self)))
return "<BaseEventArray" + address_str + ">"
@abstractmethod
@keyword_equivalence(this_or_that={'n_intervals':'n_epochs'})
def partition(self, ds=None, n_intervals=None):
return
@abstractmethod
def isempty(self):
return
@abstractmethod
def n_series(self):
return
@property
def n_intervals(self):
if self.isempty:
return 0
"""(int) The number of underlying intervals."""
return self._abscissa.support.n_intervals
@property
def series_ids(self):
return self._series_ids
@series_ids.setter
def series_ids(self, val):
if len(val) != self.n_series:
raise TypeError("series_ids must be of length n_series")
elif len(set(val)) < len(val):
raise TypeError("duplicate series_ids are not allowed")
else:
try:
series_ids = [int(id) for id in val]
except TypeError:
raise TypeError("series_ids must be int-like")
self._series_ids = series_ids
@property
def series_labels(self):
if self._series_labels is None:
logging.warning("series labels have not yet been specified")
return self.series_ids
return self._series_labels
@series_labels.setter
def series_labels(self, val):
if len(val) != self.n_series:
raise TypeError("labels must be of length n_series")
else:
try:
labels = [str(label) for label in val]
except TypeError:
raise TypeError("labels must be string-like")
self._series_labels = labels
@property
def series_tags(self):
if self._series_tags is None:
logging.warning("series tags have not yet been specified")
return self._series_tags
@property
def support(self):
return self._abscissa.support
@support.setter
def support(self, val):
if isinstance(val, type(self._abscissa.support)):
self._abscissa.support = val
elif isinstance(val, (tuple, list)):
prev_domain = self._abscissa.domain
self._abscissa.support = type(self._abscissa.support)([val[0], val[1]])
self._abscissa.domain = prev_domain
else:
raise TypeError('support must be of type {}'.format(str(type(self._abscissa.support))))
self._restrict_to_interval(self._abscissa.support)
@property
def domain(self):
return self._abscissa.domain
@domain.setter
def domain(self, val):
if isinstance(val, type(self._abscissa.support)):
self._abscissa.domain = val
elif isinstance(val, (tuple, list)):
self._abscissa.domain = type(self._abscissa.support)([val[0], val[1]])
else:
raise TypeError('support must be of type {}'.format(str(type(self._abscissa.support))))
self._restrict_to_interval(self._abscissa.support)
@property
def fs(self):
return self._fs
@fs.setter
def fs(self, val):
if self._fs == val:
return
try:
if val <= 0:
raise ValueError("sampling rate must be positive")
except:
raise TypeError("sampling rate must be a scalar")
self._fs = val
@property
|
MIT License
|
energypathways/energypathways
|
model_building_tools/gen_classes/json_loader.py
|
Scenario._load_measures
|
python
|
def _load_measures(self, tree):
for key, subtree in tree.iteritems():
if key.lower() == 'sensitivities':
self._load_sensitivities(subtree)
elif isinstance(subtree, dict):
self._load_measures(subtree)
elif key in self.MEASURE_CATEGORIES and isinstance(subtree, list):
for measure in subtree:
try:
bucket_id = self._bucket_lookup[key][measure]
except KeyError:
print("{} scenario wants to use {} {} but no such measure was found in the database.".format(self._id, key, measure))
continue
if measure in self._measures[key][bucket_id]:
raise ValueError("Scenario uses {} {} more than once.".format(key, measure))
self._measures[key][bucket_id].append(measure)
elif not isinstance(subtree, basestring):
raise ValueError("Encountered an uninterpretable non-string leaf node while loading the scenario. "
"The node is '{}: {}'".format(key, subtree))
|
Finds all measures in the Scenario by recursively scanning the parsed json and organizes them by type
(i.e. table) and a "bucket_id", which is a tuple of subsector/node id and technology id. If the particular
measure doesn't applies to a whole subsector/node rather than a technology, the second member of the
bucket_id tuple will be None.
|
https://github.com/energypathways/energypathways/blob/0fb0ead475b6395f6b07fc43fe6c85826ee47d0f/model_building_tools/gen_classes/json_loader.py#L262-L287
|
import json
import os
import logging
import itertools
from collections import defaultdict
import psycopg2
import pandas as pd
import pdb
con = None
cur = None
def init_db(pg_host, pg_database, pg_user, pg_password=None):
global con, cur
conn_str = "host='%s' dbname='%s' user='%s'" % (pg_host, pg_database, pg_user)
if pg_password:
conn_str += " password='%s'" % pg_password
con = psycopg2.connect(conn_str)
cur = con.cursor()
def make_unique(names):
d = defaultdict(int)
for id, name in names.iteritems():
d[name] += 1
for id, name in names.iteritems():
if d[name] > 1:
names[id] = names[id] + ' ({})'.format(id)
return names
table_by_name = {}
def get_name_for_id(table_name, id, fix_dupes=True):
try:
names = table_by_name[table_name]
except KeyError:
query = 'select id, name from "{}"'.format(table_name)
cur.execute(query)
names = pd.Series({tup[0] : tup[1] for tup in cur.fetchall()})
table_by_name[table_name] = names
if len(names) != len(names.unique()):
if fix_dupes:
names = make_unique(names)
if id in names:
return names[id]
print("Id {} was not found for table {}".format(id, table_name))
return None
def read_foreign_keys():
from postgres import ForeignKeyQuery
cur.execute(ForeignKeyQuery)
rows = cur.fetchall()
columns = ['table_name', 'column_name', 'foreign_table_name', 'foreign_column_name']
df = pd.DataFrame(data=rows, columns=columns)
return df
def sql_read_table(table_name, column_names='*', return_unique=False, return_iterable=False, **filters):
if not isinstance(column_names, basestring):
column_names = ', '.join(column_names)
distinct = 'DISTINCT ' if return_unique else ''
query = 'SELECT ' + distinct + column_names + ' FROM "%s"' % table_name
if len(filters):
datatypes = sql_get_datatype(table_name, filters.keys())
list_of_filters = ['"' + col + '"=' + fix_sql_query_type(fil, datatypes[col]) if fil is not None else '"' + col + '"is' + 'NULL' for col, fil in filters.items()]
if list_of_filters:
query = query + " where " + " and ".join(list_of_filters)
cur.execute(query)
data = [tup[0] if len(tup) == 1 else tup for tup in cur.fetchall()]
else:
data = [None]
else:
cur.execute(query)
data = [tup[0] if len(tup) == 1 else tup for tup in cur.fetchall()]
if len(data) == 0 or data == [None]:
return [] if return_iterable else None
elif len(data) == 1:
return data if return_iterable else data[0]
else:
return data
def sql_get_datatype(table_name, column_names):
if isinstance(column_names, basestring):
column_names = [column_names]
cur.execute("select column_name, data_type from INFORMATION_SCHEMA.COLUMNS where table_name = %s and table_schema = 'public';", (table_name,))
table_info = cur.fetchall()
return dict([tup for tup in table_info if tup[0] in column_names])
def fix_sql_query_type(string, sqltype):
if sqltype == 'INTEGER':
return str(string)
else:
return "'" + str(string) + "'"
class Scenario():
MEASURE_CATEGORIES = ("DemandEnergyEfficiencyMeasures",
"DemandFlexibleLoadMeasures",
"DemandFuelSwitchingMeasures",
"DemandServiceDemandMeasures",
"DemandSalesShareMeasures",
"DemandStockMeasures",
"BlendNodeBlendMeasures",
"SupplyExportMeasures",
"SupplySalesMeasures",
"SupplySalesShareMeasures",
"SupplyStockMeasures",
"CO2PriceMeasures")
PARENT_COLUMN_NAMES = ('parent_id', 'subsector_id', 'supply_node_id', 'primary_node_id', 'import_node_id',
'demand_tech_id', 'demand_technology_id', 'supply_tech_id', 'supply_technology_id')
def __init__(self, scenario_id):
self._id = scenario_id
self._bucket_lookup = self._load_bucket_lookup()
scenario_file = scenario_id if scenario_id.endswith('.json') else scenario_id + '.json'
with open(scenario_file) as scenario_data:
scenario = json.load(scenario_data)
assert len(scenario) == 1, "More than one scenario found at top level in {}: {}".format(
scenario_file, ", ".join(scenario_data.keys)
)
self.scenario = scenario
self._name = scenario.keys()[0]
self._measures = {category: defaultdict(list) for category in self.MEASURE_CATEGORIES}
self._sensitivities = defaultdict(dict)
self._load_measures(scenario)
@staticmethod
def _index_col(measure_table):
if measure_table.startswith('Demand'):
return 'subsector_id'
elif measure_table == "BlendNodeBlendMeasures":
return 'blend_node_id'
else:
return 'supply_node_id'
@staticmethod
def _subindex_col(measure_table):
if measure_table in ("DemandSalesShareMeasures", "DemandStockMeasures"):
return 'demand_technology_id'
elif measure_table in ("SupplySalesMeasures", "SupplySalesShareMeasures", "SupplyStockMeasures"):
return 'supply_technology_id'
else:
return None
@classmethod
def parent_col(cls, data_table):
if data_table == 'DemandSalesData':
return 'demand_technology_id'
if data_table == 'SupplyTechsEfficiencyData':
return 'supply_tech_id'
if data_table in ('SupplySalesData', 'SupplySalesShareData'):
return 'supply_technology_id'
cur.execute("""
SELECT column_name FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = %s
""", (data_table,))
cols = [row[0] for row in cur]
if not cols:
raise ValueError("Could not find any columns for table {}. Did you misspell the table "
"name?".format(data_table))
parent_cols = [col for col in cls.PARENT_COLUMN_NAMES if col in cols]
if not parent_cols:
raise ValueError("Could not find any known parent-referencing columns in {}. "
"Are you sure it's a table that references a parent table?".format(data_table))
elif len(parent_cols) > 1:
logging.debug("More than one potential parent-referencing column was found in {}; "
"we are using the first in this list: {}".format(data_table, parent_cols))
return parent_cols[0]
def _load_bucket_lookup(self):
lookup = defaultdict(dict)
for table in self.MEASURE_CATEGORIES:
subindex_col = self._subindex_col(table)
if subindex_col:
cur.execute('SELECT id, {}, {} FROM "{}";'.format(self._index_col(table), subindex_col, table))
for row in cur.fetchall():
lookup[table][row[0]] = (row[1], row[2])
else:
cur.execute('SELECT id, {} FROM "{}";'.format(self._index_col(table), table))
for row in cur.fetchall():
lookup[table][row[0]] = (row[1], None)
return lookup
def _load_sensitivities(self, sensitivities):
if not isinstance(sensitivities, list):
raise ValueError("The 'Sensitivities' for a scenario should be a list of objects containing "
"the keys 'table', 'parent_id' and 'sensitivity'.")
for sensitivity_spec in sensitivities:
table = sensitivity_spec['table']
parent_id = sensitivity_spec['parent_id']
sensitivity = sensitivity_spec['sensitivity']
if parent_id in self._sensitivities[table]:
raise ValueError("Scenario specifies sensitivity for {} {} more than once".format(table, parent_id))
cur.execute("""
SELECT COUNT(*) AS count
FROM "{}"
WHERE {} = %s AND sensitivity = %s
""".format(table, self.parent_col(table)), (parent_id, sensitivity))
row_count = cur.fetchone()[0]
if row_count == 0:
print("Could not find sensitivity '{}' for {} {}.".format(sensitivity, table, parent_id))
continue
self._sensitivities[table][parent_id] = sensitivity
|
MIT License
|
docusign/docusign-python-client
|
docusign_esign/models/full_name.py
|
FullName.anchor_tab_processor_version_metadata
|
python
|
def anchor_tab_processor_version_metadata(self):
return self._anchor_tab_processor_version_metadata
|
Gets the anchor_tab_processor_version_metadata of this FullName. # noqa: E501
:return: The anchor_tab_processor_version_metadata of this FullName. # noqa: E501
:rtype: PropertyMetadata
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/full_name.py#L690-L697
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class FullName(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'anchor_allow_white_space_in_characters': 'str',
'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata',
'anchor_case_sensitive': 'str',
'anchor_case_sensitive_metadata': 'PropertyMetadata',
'anchor_horizontal_alignment': 'str',
'anchor_horizontal_alignment_metadata': 'PropertyMetadata',
'anchor_ignore_if_not_present': 'str',
'anchor_ignore_if_not_present_metadata': 'PropertyMetadata',
'anchor_match_whole_word': 'str',
'anchor_match_whole_word_metadata': 'PropertyMetadata',
'anchor_string': 'str',
'anchor_string_metadata': 'PropertyMetadata',
'anchor_tab_processor_version': 'str',
'anchor_tab_processor_version_metadata': 'PropertyMetadata',
'anchor_units': 'str',
'anchor_units_metadata': 'PropertyMetadata',
'anchor_x_offset': 'str',
'anchor_x_offset_metadata': 'PropertyMetadata',
'anchor_y_offset': 'str',
'anchor_y_offset_metadata': 'PropertyMetadata',
'bold': 'str',
'bold_metadata': 'PropertyMetadata',
'conditional_parent_label': 'str',
'conditional_parent_label_metadata': 'PropertyMetadata',
'conditional_parent_value': 'str',
'conditional_parent_value_metadata': 'PropertyMetadata',
'custom_tab_id': 'str',
'custom_tab_id_metadata': 'PropertyMetadata',
'document_id': 'str',
'document_id_metadata': 'PropertyMetadata',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_color_metadata': 'PropertyMetadata',
'font_metadata': 'PropertyMetadata',
'font_size': 'str',
'font_size_metadata': 'PropertyMetadata',
'form_order': 'str',
'form_order_metadata': 'PropertyMetadata',
'form_page_label': 'str',
'form_page_label_metadata': 'PropertyMetadata',
'form_page_number': 'str',
'form_page_number_metadata': 'PropertyMetadata',
'height': 'str',
'height_metadata': 'PropertyMetadata',
'italic': 'str',
'italic_metadata': 'PropertyMetadata',
'locale_policy': 'LocalePolicyTab',
'merge_field': 'MergeField',
'merge_field_xml': 'str',
'name': 'str',
'name_metadata': 'PropertyMetadata',
'page_number': 'str',
'page_number_metadata': 'PropertyMetadata',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_id_guid_metadata': 'PropertyMetadata',
'recipient_id_metadata': 'PropertyMetadata',
'smart_contract_information': 'SmartContractInformation',
'source': 'str',
'status': 'str',
'status_metadata': 'PropertyMetadata',
'tab_group_labels': 'list[str]',
'tab_group_labels_metadata': 'PropertyMetadata',
'tab_id': 'str',
'tab_id_metadata': 'PropertyMetadata',
'tab_label': 'str',
'tab_label_metadata': 'PropertyMetadata',
'tab_order': 'str',
'tab_order_metadata': 'PropertyMetadata',
'tab_type': 'str',
'tab_type_metadata': 'PropertyMetadata',
'template_locked': 'str',
'template_locked_metadata': 'PropertyMetadata',
'template_required': 'str',
'template_required_metadata': 'PropertyMetadata',
'tooltip': 'str',
'tool_tip_metadata': 'PropertyMetadata',
'underline': 'str',
'underline_metadata': 'PropertyMetadata',
'value': 'str',
'value_metadata': 'PropertyMetadata',
'width': 'str',
'width_metadata': 'PropertyMetadata',
'x_position': 'str',
'x_position_metadata': 'PropertyMetadata',
'y_position': 'str',
'y_position_metadata': 'PropertyMetadata'
}
attribute_map = {
'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters',
'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata',
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata',
'anchor_string': 'anchorString',
'anchor_string_metadata': 'anchorStringMetadata',
'anchor_tab_processor_version': 'anchorTabProcessorVersion',
'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata',
'anchor_units': 'anchorUnits',
'anchor_units_metadata': 'anchorUnitsMetadata',
'anchor_x_offset': 'anchorXOffset',
'anchor_x_offset_metadata': 'anchorXOffsetMetadata',
'anchor_y_offset': 'anchorYOffset',
'anchor_y_offset_metadata': 'anchorYOffsetMetadata',
'bold': 'bold',
'bold_metadata': 'boldMetadata',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_label_metadata': 'conditionalParentLabelMetadata',
'conditional_parent_value': 'conditionalParentValue',
'conditional_parent_value_metadata': 'conditionalParentValueMetadata',
'custom_tab_id': 'customTabId',
'custom_tab_id_metadata': 'customTabIdMetadata',
'document_id': 'documentId',
'document_id_metadata': 'documentIdMetadata',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_color_metadata': 'fontColorMetadata',
'font_metadata': 'fontMetadata',
'font_size': 'fontSize',
'font_size_metadata': 'fontSizeMetadata',
'form_order': 'formOrder',
'form_order_metadata': 'formOrderMetadata',
'form_page_label': 'formPageLabel',
'form_page_label_metadata': 'formPageLabelMetadata',
'form_page_number': 'formPageNumber',
'form_page_number_metadata': 'formPageNumberMetadata',
'height': 'height',
'height_metadata': 'heightMetadata',
'italic': 'italic',
'italic_metadata': 'italicMetadata',
'locale_policy': 'localePolicy',
'merge_field': 'mergeField',
'merge_field_xml': 'mergeFieldXml',
'name': 'name',
'name_metadata': 'nameMetadata',
'page_number': 'pageNumber',
'page_number_metadata': 'pageNumberMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_id_guid_metadata': 'recipientIdGuidMetadata',
'recipient_id_metadata': 'recipientIdMetadata',
'smart_contract_information': 'smartContractInformation',
'source': 'source',
'status': 'status',
'status_metadata': 'statusMetadata',
'tab_group_labels': 'tabGroupLabels',
'tab_group_labels_metadata': 'tabGroupLabelsMetadata',
'tab_id': 'tabId',
'tab_id_metadata': 'tabIdMetadata',
'tab_label': 'tabLabel',
'tab_label_metadata': 'tabLabelMetadata',
'tab_order': 'tabOrder',
'tab_order_metadata': 'tabOrderMetadata',
'tab_type': 'tabType',
'tab_type_metadata': 'tabTypeMetadata',
'template_locked': 'templateLocked',
'template_locked_metadata': 'templateLockedMetadata',
'template_required': 'templateRequired',
'template_required_metadata': 'templateRequiredMetadata',
'tooltip': 'tooltip',
'tool_tip_metadata': 'toolTipMetadata',
'underline': 'underline',
'underline_metadata': 'underlineMetadata',
'value': 'value',
'value_metadata': 'valueMetadata',
'width': 'width',
'width_metadata': 'widthMetadata',
'x_position': 'xPosition',
'x_position_metadata': 'xPositionMetadata',
'y_position': 'yPosition',
'y_position_metadata': 'yPositionMetadata'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._anchor_allow_white_space_in_characters = None
self._anchor_allow_white_space_in_characters_metadata = None
self._anchor_case_sensitive = None
self._anchor_case_sensitive_metadata = None
self._anchor_horizontal_alignment = None
self._anchor_horizontal_alignment_metadata = None
self._anchor_ignore_if_not_present = None
self._anchor_ignore_if_not_present_metadata = None
self._anchor_match_whole_word = None
self._anchor_match_whole_word_metadata = None
self._anchor_string = None
self._anchor_string_metadata = None
self._anchor_tab_processor_version = None
self._anchor_tab_processor_version_metadata = None
self._anchor_units = None
self._anchor_units_metadata = None
self._anchor_x_offset = None
self._anchor_x_offset_metadata = None
self._anchor_y_offset = None
self._anchor_y_offset_metadata = None
self._bold = None
self._bold_metadata = None
self._conditional_parent_label = None
self._conditional_parent_label_metadata = None
self._conditional_parent_value = None
self._conditional_parent_value_metadata = None
self._custom_tab_id = None
self._custom_tab_id_metadata = None
self._document_id = None
self._document_id_metadata = None
self._error_details = None
self._font = None
self._font_color = None
self._font_color_metadata = None
self._font_metadata = None
self._font_size = None
self._font_size_metadata = None
self._form_order = None
self._form_order_metadata = None
self._form_page_label = None
self._form_page_label_metadata = None
self._form_page_number = None
self._form_page_number_metadata = None
self._height = None
self._height_metadata = None
self._italic = None
self._italic_metadata = None
self._locale_policy = None
self._merge_field = None
self._merge_field_xml = None
self._name = None
self._name_metadata = None
self._page_number = None
self._page_number_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_id_guid_metadata = None
self._recipient_id_metadata = None
self._smart_contract_information = None
self._source = None
self._status = None
self._status_metadata = None
self._tab_group_labels = None
self._tab_group_labels_metadata = None
self._tab_id = None
self._tab_id_metadata = None
self._tab_label = None
self._tab_label_metadata = None
self._tab_order = None
self._tab_order_metadata = None
self._tab_type = None
self._tab_type_metadata = None
self._template_locked = None
self._template_locked_metadata = None
self._template_required = None
self._template_required_metadata = None
self._tooltip = None
self._tool_tip_metadata = None
self._underline = None
self._underline_metadata = None
self._value = None
self._value_metadata = None
self._width = None
self._width_metadata = None
self._x_position = None
self._x_position_metadata = None
self._y_position = None
self._y_position_metadata = None
self.discriminator = None
setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None))
setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None))
setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None))
setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None))
setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None))
setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None))
setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None))
setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None))
setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None))
setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None))
setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None))
setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None))
setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None))
setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None))
setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None))
setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None))
setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None))
setattr(self, "_{}".format('bold'), kwargs.get('bold', None))
setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None))
setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None))
setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None))
setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None))
setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None))
setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None))
setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None))
setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None))
setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None))
setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None))
setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None))
setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None))
setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None))
setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None))
setattr(self, "_{}".format('height'), kwargs.get('height', None))
setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None))
setattr(self, "_{}".format('italic'), kwargs.get('italic', None))
setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None))
setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None))
setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None))
setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None))
setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None))
setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None))
setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None))
setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None))
setattr(self, "_{}".format('source'), kwargs.get('source', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None))
setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None))
setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None))
setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None))
setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None))
setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None))
setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None))
setattr(self, "_{}".format('underline'), kwargs.get('underline', None))
setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None))
setattr(self, "_{}".format('width'), kwargs.get('width', None))
setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None))
setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None))
setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None))
setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None))
setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None))
@property
def anchor_allow_white_space_in_characters(self):
return self._anchor_allow_white_space_in_characters
@anchor_allow_white_space_in_characters.setter
def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters):
self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters
@property
def anchor_allow_white_space_in_characters_metadata(self):
return self._anchor_allow_white_space_in_characters_metadata
@anchor_allow_white_space_in_characters_metadata.setter
def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata):
self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata
@property
def anchor_case_sensitive(self):
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_case_sensitive_metadata(self):
return self._anchor_case_sensitive_metadata
@anchor_case_sensitive_metadata.setter
def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata):
self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata
@property
def anchor_horizontal_alignment(self):
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_horizontal_alignment_metadata(self):
return self._anchor_horizontal_alignment_metadata
@anchor_horizontal_alignment_metadata.setter
def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata):
self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata
@property
def anchor_ignore_if_not_present(self):
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_ignore_if_not_present_metadata(self):
return self._anchor_ignore_if_not_present_metadata
@anchor_ignore_if_not_present_metadata.setter
def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata):
self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata
@property
def anchor_match_whole_word(self):
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_match_whole_word_metadata(self):
return self._anchor_match_whole_word_metadata
@anchor_match_whole_word_metadata.setter
def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata):
self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata
@property
def anchor_string(self):
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
self._anchor_string = anchor_string
@property
def anchor_string_metadata(self):
return self._anchor_string_metadata
@anchor_string_metadata.setter
def anchor_string_metadata(self, anchor_string_metadata):
self._anchor_string_metadata = anchor_string_metadata
@property
def anchor_tab_processor_version(self):
return self._anchor_tab_processor_version
@anchor_tab_processor_version.setter
def anchor_tab_processor_version(self, anchor_tab_processor_version):
self._anchor_tab_processor_version = anchor_tab_processor_version
@property
|
MIT License
|
ucam-smt/sgnmt
|
cam/sgnmt/ui.py
|
parse_args
|
python
|
def parse_args(parser):
args = parser.parse_args()
if args.config_file:
if not YAML_AVAILABLE:
logging.fatal("Install PyYAML in order to use config files.")
return args
paths = args.config_file
delattr(args, 'config_file')
arg_dict = args.__dict__
for path in utils.split_comma(paths):
_load_config_file(arg_dict, path)
return args
|
http://codereview.stackexchange.com/questions/79008/parse-a-config-file-
and-add-to-command-line-arguments-using-argparse-in-python
|
https://github.com/ucam-smt/sgnmt/blob/c663ec7b251552e36b6b4f992f0ac21aad87cb7b/cam/sgnmt/ui.py#L122-L135
|
import argparse
import logging
import os
import sys
import platform
from cam.sgnmt import utils
from cam import sgnmt
YAML_AVAILABLE = True
try:
import yaml
except:
YAML_AVAILABLE = False
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def run_diagnostics():
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
print("Checking SGNMT version.... %s%s%s"
% (OKGREEN, sgnmt.__version__, ENDC))
if sys.version_info > (3, 0):
print("Checking Python3.... %sOK (%s)%s"
% (OKGREEN, platform.python_version(), ENDC))
else:
print("Checking Python3.... %sNOT FOUND %s%s"
% (FAIL, sys.version_info, ENDC))
print("Please upgrade to Python 3!")
if YAML_AVAILABLE:
print("Checking PyYAML.... %sOK%s" % (OKGREEN, ENDC))
else:
print("Checking PyYAML.... %sNOT FOUND%s" % (FAIL, ENDC))
logging.info("NOTOK: PyYAML is not available. That means that "
"--config_file cannot be used. Check the documentation "
"for further instructions.")
try:
import tensorflow as tf
print("Checking TensorFlow.... %sOK (%s)%s"
% (OKGREEN, tf.__version__, ENDC))
except ImportError:
print("Checking TensorFlow.... %sNOT FOUND%s" % (FAIL, ENDC))
print("TensorFlow is not available. This affects the following "
"components: Predictors: *t2t, lexnizza, nizza; interpolation "
"schemes: MoE interpolation. Check the documentation for "
"further instructions.")
try:
import tensor2tensor
print("Checking Tensor2Tensor.... %sOK%s" % (OKGREEN, ENDC))
except ImportError:
print("Checking Tensor2Tensor.... %sNOT FOUND%s" % (FAIL, ENDC))
print("Tensor2Tensor is not available. This affects the following "
"components: Predictors: t2t, edit2t, fertt2t, segt2t. Check "
"the documentation for further instructions.")
try:
import openfst_python as fst
print("Checking OpenFST.... %sOK (openfst_python)%s" % (OKGREEN, ENDC))
except ImportError:
try:
import pywrapfst as fst
print("Checking OpenFST.... %sOK (pywrapfst)%s" % (OKGREEN, ENDC))
except ImportError:
print("Checking OpenFST.... %sNOT FOUND%s" % (FAIL, ENDC))
print("OpenFST is not available. This affects the following "
"components: Predictors: nfst, fst, rtn; decoders: fstbeam; "
"outputs: fst, sfst. Check the documentation for further "
"instructions.")
try:
import kenlm
print("Checking KenLM.... %sOK%s" % (OKGREEN, ENDC))
except ImportError:
print("Checking KenLM.... %sNOT FOUND%s" % (FAIL, ENDC))
print("KenLM is not available. This affects the following components: "
"Predictors: kenlm. Check the documentation for further "
"instructions.")
try:
import torch
print("Checking PyTorch.... %sOK (%s)%s"
% (OKGREEN, torch.__version__, ENDC))
except ImportError:
print("Checking PyTorch.... %sNOT FOUND%s" % (FAIL, ENDC))
print("PyTorch is not available. This affects the following "
"components: Predictors: fairseq, onmtpy. Check the "
"documentation for further instructions.")
try:
import fairseq
print("Checking fairseq.... %sOK (%s)%s"
% (OKGREEN, fairseq.__version__, ENDC))
except ImportError:
print("Checking fairseq.... %sNOT FOUND%s" % (FAIL, ENDC))
print("fairseq is not available. This affects the following "
"components: Predictors: fairseq. Check the "
"documentation for further instructions.")
|
Apache License 2.0
|
jason-green-io/papyri
|
papyri.py
|
genZoom17Tiles
|
python
|
def genZoom17Tiles(level4MapFolder, outputFolder):
globString = filenameSeparator.join(["*", "*", "*.png"])
level4MapFilenames = glob.glob(os.path.join(level4MapFolder, globString))
for level4MapFilename in tqdm(level4MapFilenames, "level 4 -> zoom 17 tiles", bar_format="{l_bar}{bar}"):
name = os.path.basename(level4MapFilename)
dim, x, z, _ = name.split(filenameSeparator)
level4x = int(x)
level4z = int(z)
tilex = level4x // 2048
tilez = level4z // 2048 * -1
with Image.open(level4MapFilename) as level4MapPng:
for zoom in range(17, 18):
numTiles = 2 ** (zoom - 13)
imageWidth = 2048 // numTiles
for numx in range(numTiles):
levelNumx = tilex * numTiles + numx
foldername = os.path.join(outputFolder, dim, str(zoom), str(levelNumx))
os.makedirs(foldername, exist_ok=True)
for numz in range(numTiles):
levelNumz = tilez * numTiles + numz
cropBox = (numx * imageWidth,
numz * imageWidth,
numx * imageWidth + imageWidth,
numz * imageWidth + imageWidth)
filename = os.path.join(foldername, str(levelNumz) + ".png")
tilePng = level4MapPng.crop(cropBox)
tilePng = tilePng.resize((256, 256), Image.NEAREST)
tilePng.save(filename)
|
generates lowest zoom level tiles from combined zoom level 4 maps
|
https://github.com/jason-green-io/papyri/blob/53ab16ceab3e6d847e58b85a4b05509e26362190/papyri.py#L541-L576
|
import os
import datetime
import glob
import logging
import nbtlib
import bedrock.leveldb as leveldb
from PIL import ImageFont, Image, ImageDraw
import math
import operator
from collections.abc import Callable
from collections import defaultdict, OrderedDict, namedtuple
from tqdm import tqdm
import argparse
import gzip
import json
import distutils.dir_util
import re
from io import BytesIO
import sys
import hashlib
import time
import struct
__author__ = "Jason Green"
__copyright__ = "Copyright 2020, Tesseract Designs"
__credits__ = ["Jason Green"]
__license__ = "MIT"
__version__ = "2.0.5"
__maintainer__ = "Jason Green"
__email__ = "jason@green.io"
__status__ = "release"
dir_path = os.path.dirname(os.path.realpath(__file__))
filenameSeparator = "."
mapPngFilenameFormat = filenameSeparator.join(["{mapId}", "{mapHash}", "{epoch}", "{dimension}", "{x}", "{z}", "{scale}.png"])
now = int(time.time())
multipliers = [180, 220, 255, 135]
basecolors = [(0, 0, 0, 0),
(127, 178, 56),
(247, 233, 163),
(199, 199, 199),
(255, 0, 0),
(160, 160, 255),
(167, 167, 167),
(0, 124, 0),
(255, 255, 255),
(164, 168, 184),
(151, 109, 77),
(112, 112, 112),
(64, 64, 255),
(143, 119, 72),
(255, 252, 245),
(216, 127, 51),
(178, 76, 216),
(102, 153, 216),
(229, 229, 51),
(127, 204, 25),
(242, 127, 165),
(76, 76, 76),
(153, 153, 153),
(76, 127, 153),
(127, 63, 178),
(51, 76, 178),
(102, 76, 51),
(102, 127, 51),
(153, 51, 51),
(25, 25, 25),
(250, 238, 77),
(92, 219, 213),
(74, 128, 255),
(0, 217, 58),
(129, 86, 49),
(112, 2, 0),
(209, 177, 161),
(159, 82, 36),
(149, 87, 108),
(112, 108, 138),
(186, 133, 36),
(103, 117, 53),
(160, 77, 78),
(57, 41, 35),
(135, 107, 98),
(87, 92, 92),
(122, 73, 88),
(76, 62, 92),
(76, 50, 35),
(76, 82, 42),
(142, 60, 46),
(37, 22, 16),
(189, 48, 49),
(148, 63, 97),
(92, 25, 29),
(22, 126, 134),
(58, 142, 140),
(86, 44, 62),
(20, 180, 133),
(100, 100, 100),
(216, 175, 147),
(127, 167, 150)]
def multiplyColor(colorTuple, multiplier):
return tuple([math.floor(a * multiplier / 255.0) for a in colorTuple])
allColors = [multiplyColor(color, multiplier)
for color in basecolors for multiplier in multipliers]
dimDict = {-1: "minecraft:the_nether",
0: "minecraft:overworld",
1: "minecraft:the_end",
'minecraft:overworld': 0,
'minecraft:the_end': 1,
'minecraft:the_nether': -1,
'minecraft@overworld': 0,
'minecraft@the_end': 1,
'minecraft@the_nether': -1}
def findMapFiles(inputFolder):
mapFiles = []
folderTree = list(os.walk(inputFolder))
dataFolders = [f for f in folderTree if f[0].endswith(os.sep + "data")]
for folder in dataFolders:
maybeMapFiles = [os.path.join(folder[0], f) for f in folder[2] if f.startswith("map_") and f.endswith(".dat")]
if "idcounts.dat" in folder[2]:
logging.info("Found %s maps in %s", len(maybeMapFiles), folder[0])
mapFiles = maybeMapFiles
if not mapFiles:
logging.info("Didn't find any maps, did you specify the correct world location?")
sys.exit(1)
return mapFiles
class LastUpdatedOrderedDict(OrderedDict):
def __setitem__(self, key, value):
if key in self:
del self[key]
OrderedDict.__setitem__(self, key, value)
class DefaultOrderedDict(OrderedDict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
BannerTuple = namedtuple("BannerTuple", ["X", "Y", "Z", "name", "color", "dimension"])
MapTuple = namedtuple("MapTuple", ["mapData", "bannerData", "frameData"])
MapPngTuple = namedtuple("MapPngTuple", ["mapId", "mapHash", "epoch", "x", "z", "dimension", "scale"])
def mapPngsSortedByEpoch(mapPngs):
centerEpochs = []
for mapPng in mapPngs:
centerEpochs.append((mapPng.epoch, mapPng))
centerEpochs.sort(key=operator.itemgetter(0))
return [m[1] for m in centerEpochs]
def filterLatestMapPngsById(mapPngs):
idEpochs = []
filterDict = {}
for mapPng in mapPngs:
idEpochs.append((mapPng.mapId, mapPng.epoch, mapPng))
idEpochs.sort(key=operator.itemgetter(1))
for idEpoch in idEpochs:
filterDict[idEpoch[0]] = idEpoch[2]
latestMapPngs = list(filterDict.values())
return latestMapPngs
def makeMaps(worldFolder, outputFolder, serverType, unlimitedTracking=False):
nbtMapData = []
if serverType == "bds":
db = leveldb.open(os.path.join(worldFolder, "db"))
for a in tqdm(leveldb.iterate(db), "leveldb map keys -> nbt".ljust(24), bar_format="{l_bar}{bar}"):
key = bytearray(a[0])
if b"map" in key:
mapNbtIo = BytesIO(a[1])
mapNbtFile = nbtlib.File.parse(mapNbtIo, byteorder="little")
mapNbt = mapNbtFile.root
mapId = int(mapNbt["mapId"])
epoch = 0
nbtMapData.append({"epoch": epoch, "id": mapId, "nbt": mapNbt})
elif serverType == "java":
mapDatFiles = findMapFiles(worldFolder)
for mapDatFile in tqdm(mapDatFiles, "map_*.dat -> nbt".ljust(24), bar_format="{l_bar}{bar}"):
mapNbtFile = nbtlib.load(mapDatFile)
mapNbt = mapNbtFile.root["data"]
mapId = int(os.path.basename(mapDatFile)[4:-4])
epoch = int(os.path.getmtime(mapDatFile))
nbtMapData.append({"epoch": epoch, "id": mapId, "nbt": mapNbt})
maps = []
os.makedirs(outputFolder, exist_ok=True)
mapPngs = getMapPngs(outputFolder)
currentIds = {x.mapId: x for x in mapPngs}
for nbtMap in tqdm(nbtMapData, "nbt -> png".ljust(24), bar_format="{l_bar}{bar}"):
mapId = nbtMap["id"]
mapNbt = nbtMap["nbt"]
mapEpoch = nbtMap["epoch"]
try:
mapUnlimitedTracking = mapNbt["unlimitedTracking"]
except KeyError:
mapUnlimitedTracking = False
if mapUnlimitedTracking and not unlimitedTracking:
continue
scale = int(mapNbt["scale"])
x = int(mapNbt["xCenter"])
z = int(mapNbt["zCenter"])
dimension = mapNbt["dimension"]
mapColors = mapNbt["colors"]
if type(dimension) == nbtlib.tag.Int:
dimension = dimDict[mapNbt["dimension"]]
elif type(dimension) == nbtlib.tag.Byte:
dimension = dimDict[mapNbt["dimension"]]
else:
dimension = dimension.strip('"')
dimension = dimension.replace(":", "@")
try:
mapBanners = mapNbt["banners"]
except KeyError:
mapBanners = []
try:
mapFrames = mapNbt["frames"]
except KeyError:
mapFrames = []
banners = set()
for banner in mapBanners:
X = int(banner["Pos"]["X"])
Y = int(banner["Pos"]["Y"])
Z = int(banner["Pos"]["Z"])
color = banner["Color"]
try:
name = json.loads(banner["Name"])["text"]
except KeyError:
name = ""
bannerDict = {"X": X,
"Y": Y,
"Z": Z,
"color": color,
"name": name,
"dimension": dimension}
bannerTuple = BannerTuple(**bannerDict)
banners.add(bannerTuple)
frames = []
for frame in mapFrames:
X = int(frame["Pos"]["X"])
Y = int(frame["Pos"]["Y"])
Z = int(frame["Pos"]["Z"])
rotation = int(frame["Rotation"])
frameDict = {"X": X,
"Y": Y,
"Z": Z,
"rotation": rotation}
frames.append(frameDict)
if serverType == "bds":
mapImage = Image.frombytes("RGBA", (128, 128),
bytes([x % 256 for x in mapColors]),
'raw')
elif serverType == "java":
colorTuples = [allColors[x % 256] for x in mapColors]
mapImage = Image.new("RGBA", (128, 128))
mapImage.putdata(colorTuples)
mapHash = hashlib.md5(mapImage.tobytes()).hexdigest()
if mapHash == "fcd6bcb56c1689fcef28b57c22475bad":
continue
if mapId not in currentIds:
logging.debug("%s is a new map", mapId)
epoch = mapEpoch
else:
logging.debug("%s is already known", mapId)
if mapHash != currentIds.get(mapId).mapHash:
logging.debug("%s changed and will get an updated epoch", mapId)
epoch = now if not mapEpoch else mapEpoch
elif mapEpoch > currentIds.get(mapId).epoch:
logging.debug("%s has a more recent epoch from it's dat file, updating", mapId)
epoch = mapEpoch
else:
logging.debug("%s has not changed and will keep it's epoch", mapId)
epoch = currentIds.get(mapId).epoch
mapPng = MapPngTuple(mapId=mapId,
mapHash=mapHash,
epoch=epoch,
dimension=dimension,
x=x,
z=z,
scale=scale)
mapImage = mapImage.resize((128 * 2 ** scale,) * 2, Image.NEAREST)
filename = mapPngFilenameFormat.format(**mapPng._asdict())
try:
oldFilename = mapPngFilenameFormat.format(**currentIds.get(mapId)._asdict())
os.remove(os.path.join(outputFolder, oldFilename))
except:
logging.debug("%s isn't there, didn't delete", mapId)
mapImage.save(os.path.join(outputFolder, filename))
mapData = MapTuple(mapData=mapPng,
bannerData=banners,
frameData=frames)
maps.append(mapData)
logging.debug(maps)
logging.info("Processed %s maps", len(maps))
return maps
def getMapPngs(mapPngFolder):
mapPngList = []
globString = filenameSeparator.join(6 * ["*"] + ["*.png"])
mapPngs = glob.glob(os.path.join(mapPngFolder, globString))
for mapPng in mapPngs:
filename = os.path.basename(mapPng)
(mapId,
mapHash,
epoch,
dimension,
x,
z,
scale,
_) = filename.split(filenameSeparator)
x = int(x)
z = int(z)
scale = int(scale)
mapId = int(mapId)
if not dimension in dimDict:
logging.info("Skipped map %s with invalid dimension.", mapId)
continue
epoch = int(epoch)
mapPngList.append(MapPngTuple(mapId=mapId,
mapHash=mapHash,
epoch=epoch,
dimension=dimension,
x=x,
z=z,
scale=scale))
return mapPngList
def mergeToLevel4(mapPngFolder, outputFolder, disablezoomsort):
filenameFormat = filenameSeparator.join(["{dimension}", "{x}", "{z}.png"])
os.makedirs(outputFolder, exist_ok=True)
level4Dict = defaultdict(lambda: defaultdict(list))
mapPngs = getMapPngs(mapPngFolder)
latestMapPngs = mapPngsSortedByEpoch(mapPngs)
for mapPng in latestMapPngs:
mapTopLeft = (mapPng.x - 128 * 2 ** mapPng.scale // 2 + 64,
mapPng.z - 128 * 2 ** mapPng.scale // 2 + 64)
level4Coords = (mapTopLeft[0] // 2048 * 2048,
mapTopLeft[1] // 2048 * 2048)
level4Dict[mapPng.dimension][level4Coords].append(mapPng)
logging.debug(level4Dict)
for dim in level4Dict.items():
d = dim[0]
for coords in tqdm(dim[1].items(), "level 4 of dim: {}".format(d).ljust(24), bar_format="{l_bar}{bar}"):
c = coords[0]
mapTuples = coords[1]
if not disablezoomsort:
mapTuples.sort(key=lambda x: x.scale, reverse=True)
level4MapPng = Image.new("RGBA", (2048, 2048))
for mapTuple in mapTuples:
mapPngCoords = (divmod(mapTuple.x - 128 * 2 ** mapTuple.scale // 2 + 64, 2048)[1],
divmod(mapTuple.z - 128 * 2 ** mapTuple.scale // 2 + 64, 2048)[1])
mapPngFilename = mapPngFilenameFormat.format(**mapTuple._asdict())
with Image.open(os.path.join(mapPngFolder, mapPngFilename)) as mapPng:
level4MapPng.paste(mapPng, mapPngCoords, mapPng)
fileName = filenameFormat.format(dimension=d, x=c[0], z=c[1]*-1)
filePath = os.path.join(outputFolder, fileName)
level4MapPng.save(filePath)
level4MapPng.close()
|
MIT License
|
neuromorphicprocessorproject/snn_toolbox
|
snntoolbox/utils/utils.py
|
confirm_overwrite
|
python
|
def confirm_overwrite(filepath):
if os.path.isfile(filepath):
overwrite = input("[WARNING] {} already exists - ".format(filepath) +
"overwrite? [y/n]")
while overwrite not in ['y', 'n']:
overwrite = input("Enter 'y' (overwrite) or 'n' (cancel).")
return overwrite == 'y'
return True
|
If config.get('output', 'overwrite')==False and the file exists, ask user
if it should be overwritten.
|
https://github.com/neuromorphicprocessorproject/snn_toolbox/blob/a85ada7b5d060500703285ef8a68f06ea1ffda65/snntoolbox/utils/utils.py#L58-L70
|
import importlib
import pkgutil
import json
import numpy as np
import os
import sys
import tempfile
import tensorflow as tf
from tensorflow import keras
def get_range(start=0.0, stop=1.0, num=5, method='linear'):
methods = {'linear', 'log', 'random'}
assert method in methods, "Specified grid-search method {} not supported.\
Choose among {}".format(method, methods)
assert start < stop, "Start must be smaller than stop."
assert num > 0 and isinstance(num, int), "Number of samples must be unsigned int."
if method == 'linear':
return np.linspace(start, stop, num)
if method == 'log':
return np.logspace(start, stop, num, endpoint=False)
if method == 'random':
return np.random.random_sample(num) * (stop - start) + start
|
MIT License
|
kieranjol/ifiscripts
|
legacy_scripts/batchfixity.py
|
create_manifest
|
python
|
def create_manifest(source):
master_log = os.path.expanduser('~/Desktop/batchfixity_errors.log')
os.chdir(source)
for dirname in os.walk('.').next()[1]:
full_path = os.path.join(source, dirname)
manifest_textfile = '%s/%s_manifest.md5' % (full_path, dirname)
if not os.path.isfile(manifest_textfile):
log_name = '%s/%s_fixity.log' % (
os.path.dirname(full_path), dirname
)
generate_log(log_name, 'batchfixity started')
generate_log(log_name, '%s created' % manifest_textfile)
try:
hashlib_manifest(full_path, manifest_textfile, full_path)
generate_log(log_name, 'manifest creation complete')
shutil.move(log_name, full_path)
except IOError:
with open(master_log, 'ab') as log:
log.write(
'%s has failed probably because of special characters like a fada\n' % full_path
)
generate_log(
log_name, 'manifest has failed probably because of special characters like a fada'
)
|
Generates a master log and creates checksum manifests for all subdirectories.
|
https://github.com/kieranjol/ifiscripts/blob/4a94789d6884774d3a0cee5e6a5032e59b401727/legacy_scripts/batchfixity.py#L46-L72
|
import argparse
import os
import shutil
from ififuncs import hashlib_manifest
from ififuncs import generate_log
def count_files(source):
file_count = 1
for _, directories, filenames in os.walk(source):
filenames = [f for f in filenames if f[0] != '.']
directories[:] = [d for d in directories if d[0] != '.']
for files in filenames:
print "Calculating number of files in all subdirectories - %s files \r"% file_count,
file_count += 1
return file_count
def make_parser():
parser = argparse.ArgumentParser(
description='Batch MD5 checksum generator.'
'Accepts a parent folder as input and will generate manifest for each subfolder.'
' Designed for a specific IFI Irish Film Archive workflow. '
'Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='file path of parent directory'
)
parser.add_argument(
'-v', action='store_true',
help='verbose mode - some extra information such as overall file count.'
)
return parser
|
MIT License
|
danielfrg/jupyterhub-kubernetes_spawner
|
kubernetes_spawner/swagger_client/models/v1_resource_quota_spec.py
|
V1ResourceQuotaSpec.__init__
|
python
|
def __init__(self):
self.swagger_types = {
'hard': 'str'
}
self.attribute_map = {
'hard': 'hard'
}
self._hard = None
|
V1ResourceQuotaSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
|
https://github.com/danielfrg/jupyterhub-kubernetes_spawner/blob/15a2b63ef719f8c3ff83221333f7de69c1495512/kubernetes_spawner/swagger_client/models/v1_resource_quota_spec.py#L30-L47
|
from pprint import pformat
from six import iteritems
class V1ResourceQuotaSpec(object):
|
Apache License 2.0
|
qiskit/qiskit-aqua
|
qiskit/optimization/problems/quadratic_program.py
|
QuadraticProgram.get_num_vars
|
python
|
def get_num_vars(self, vartype: Optional[VarType] = None) -> int:
if vartype:
return sum(variable.vartype == vartype for variable in self._variables)
else:
return len(self._variables)
|
Returns the total number of variables or the number of variables of the specified type.
Args:
vartype: The type to be filtered on. All variables are counted if None.
Returns:
The total number of variables.
|
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/optimization/problems/quadratic_program.py#L522-L534
|
import logging
import warnings
from collections import defaultdict
from collections.abc import Sequence
from enum import Enum
from math import fsum, isclose
from typing import cast, List, Union, Dict, Optional, Tuple
import numpy as np
from docplex.mp.constr import (LinearConstraint as DocplexLinearConstraint,
QuadraticConstraint as DocplexQuadraticConstraint,
NotEqualConstraint)
try:
from docplex.mp.dvar import Var
except ImportError:
from docplex.mp.linear import Var
from docplex.mp.model import Model
from docplex.mp.model_reader import ModelReader
from docplex.mp.quad import QuadExpr
from docplex.mp.vartype import ContinuousVarType, BinaryVarType, IntegerVarType
from numpy import ndarray, zeros
from scipy.sparse import spmatrix
from qiskit.aqua import MissingOptionalLibraryError
from qiskit.aqua.operators import I, OperatorBase, PauliOp, WeightedPauliOperator, SummedOp, ListOp
from qiskit.quantum_info import Pauli
from .constraint import Constraint, ConstraintSense
from .linear_constraint import LinearConstraint
from .linear_expression import LinearExpression
from .quadratic_constraint import QuadraticConstraint
from .quadratic_expression import QuadraticExpression
from .quadratic_objective import QuadraticObjective
from .variable import Variable, VarType
from ..exceptions import QiskitOptimizationError
from ..infinity import INFINITY
logger = logging.getLogger(__name__)
class QuadraticProgramStatus(Enum):
VALID = 0
INFEASIBLE = 1
class QuadraticProgram:
Status = QuadraticProgramStatus
def __init__(self, name: str = '') -> None:
self._name = name
self._status = QuadraticProgram.Status.VALID
self._variables = []
self._variables_index = {}
self._linear_constraints = []
self._linear_constraints_index = {}
self._quadratic_constraints = []
self._quadratic_constraints_index = {}
self._objective = QuadraticObjective(self)
def __repr__(self) -> str:
return self.to_docplex().export_as_lp_string()
def clear(self) -> None:
self._name = ''
self._status = QuadraticProgram.Status.VALID
self._variables.clear()
self._variables_index.clear()
self._linear_constraints.clear()
self._linear_constraints_index.clear()
self._quadratic_constraints.clear()
self._quadratic_constraints_index.clear()
self._objective = QuadraticObjective(self)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
@property
def status(self) -> QuadraticProgramStatus:
return self._status
@property
def variables(self) -> List[Variable]:
return self._variables
@property
def variables_index(self) -> Dict[str, int]:
return self._variables_index
def _add_variable(self,
lowerbound: Union[float, int],
upperbound: Union[float, int],
vartype: VarType,
name: Optional[str]) -> Variable:
if name is None:
name = 'x'
key_format = '{}'
else:
key_format = ''
return self._add_variables(1, lowerbound, upperbound, vartype, name, key_format)[1][0]
def _add_variables(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int],
upperbound: Union[float, int],
vartype: VarType,
name: Optional[str],
key_format: str) -> Tuple[List[str], List[Variable]]:
if isinstance(keys, int) and keys < 1:
raise QiskitOptimizationError(
"Cannot create non-positive number of variables: {}".format(keys))
if name is None:
name = 'x'
if '{{}}' in key_format:
raise QiskitOptimizationError(
"Formatter cannot contain nested substitutions: {}".format(key_format))
if key_format.count('{}') > 1:
raise QiskitOptimizationError(
"Formatter cannot contain more than one substitution: {}".format(key_format))
def _find_name(name, key_format, k):
prev = None
while True:
new_name = name + key_format.format(k)
if new_name == prev:
raise QiskitOptimizationError(
"Variable name already exists: {}".format(new_name))
if new_name in self._variables_index:
k += 1
prev = new_name
else:
break
return new_name, k + 1
names = []
variables = []
k = self.get_num_vars()
lst = keys if isinstance(keys, Sequence) else range(keys)
for key in lst:
if isinstance(keys, Sequence):
indexed_name = name + key_format.format(key)
else:
indexed_name, k = _find_name(name, key_format, k)
if indexed_name in self._variables_index:
raise QiskitOptimizationError(
"Variable name already exists: {}".format(indexed_name))
names.append(indexed_name)
self._variables_index[indexed_name] = self.get_num_vars()
variable = Variable(self, indexed_name, lowerbound, upperbound, vartype)
self._variables.append(variable)
variables.append(variable)
return names, variables
def _var_dict(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int],
upperbound: Union[float, int],
vartype: VarType,
name: Optional[str],
key_format: str) -> Dict[str, Variable]:
return dict(
zip(*self._add_variables(keys, lowerbound, upperbound, vartype, name, key_format)))
def _var_list(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int],
upperbound: Union[float, int],
vartype: VarType,
name: Optional[str],
key_format: str) -> List[Variable]:
return self._add_variables(keys, lowerbound, upperbound, vartype, name, key_format)[1]
def continuous_var(self,
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None) -> Variable:
return self._add_variable(lowerbound, upperbound, Variable.Type.CONTINUOUS, name)
def continuous_var_dict(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None,
key_format: str = '{}') -> Dict[str, Variable]:
return self._var_dict(keys, lowerbound, upperbound, Variable.Type.CONTINUOUS, name,
key_format)
def continuous_var_list(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None,
key_format: str = '{}') -> List[Variable]:
return self._var_list(keys, lowerbound, upperbound, Variable.Type.CONTINUOUS,
name, key_format)
def binary_var(self, name: Optional[str] = None) -> Variable:
return self._add_variable(0, 1, Variable.Type.BINARY, name)
def binary_var_dict(self,
keys: Union[int, Sequence],
name: Optional[str] = None,
key_format: str = '{}') -> Dict[str, Variable]:
return self._var_dict(keys, 0, 1, Variable.Type.BINARY, name, key_format)
def binary_var_list(self,
keys: Union[int, Sequence],
name: Optional[str] = None,
key_format: str = '{}') -> List[Variable]:
return self._var_list(keys, 0, 1, Variable.Type.BINARY, name, key_format)
def integer_var(self,
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None) -> Variable:
return self._add_variable(lowerbound, upperbound, Variable.Type.INTEGER, name)
def integer_var_dict(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None,
key_format: str = '{}') -> Dict[str, Variable]:
return self._var_dict(keys, lowerbound, upperbound, Variable.Type.INTEGER, name, key_format)
def integer_var_list(self,
keys: Union[int, Sequence],
lowerbound: Union[float, int] = 0,
upperbound: Union[float, int] = INFINITY,
name: Optional[str] = None,
key_format: str = '{}') -> List[Variable]:
return self._var_list(keys, lowerbound, upperbound, Variable.Type.INTEGER, name, key_format)
def get_variable(self, i: Union[int, str]) -> Variable:
if isinstance(i, int):
return self.variables[i]
else:
return self.variables[self._variables_index[i]]
|
Apache License 2.0
|
flyteorg/flytekit
|
flytekit/models/named_entity.py
|
NamedEntityState.enum_to_string
|
python
|
def enum_to_string(cls, val):
if val == cls.ACTIVE:
return "ACTIVE"
elif val == cls.ARCHIVED:
return "ARCHIVED"
else:
return "<UNKNOWN>"
|
:param int val:
:rtype: Text
|
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/models/named_entity.py#L11-L21
|
from flyteidl.admin import common_pb2 as _common
from flytekit.models import common as _common_models
class NamedEntityState(object):
ACTIVE = _common.NAMED_ENTITY_ACTIVE
ARCHIVED = _common.NAMED_ENTITY_ARCHIVED
@classmethod
|
Apache License 2.0
|
red-hat-storage/ocs-ci
|
ocs_ci/ocs/flowtest.py
|
FlowOperations.__init__
|
python
|
def __init__(self):
self.sanity_helpers = Sanity()
|
Initialize Sanity instance
|
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/ocs_ci/ocs/flowtest.py#L19-L24
|
import logging
from ocs_ci.ocs import node, defaults, exceptions, constants
from ocs_ci.ocs.node import wait_for_nodes_status
from ocs_ci.ocs.resources import pod as pod_helpers
from ocs_ci.ocs.resources.pod import wait_for_storage_pods
from ocs_ci.utility.utils import TimeoutSampler, ceph_health_check
from ocs_ci.helpers.sanity_helpers import Sanity
logger = logging.getLogger(__name__)
class FlowOperations:
|
MIT License
|
voipgrid/cacofonisk
|
cacofonisk/reporters.py
|
BaseReporter.on_blind_transfer
|
python
|
def on_blind_transfer(self, caller, transferer, targets):
pass
|
Gets invoked when a blind transfer is completed.
A blind transfer is transfer where the transferer tells Asterisk to
refer the call to another number and immediately ends the call. No
b_dial is triggered before the transfer is completed.
Args:
caller (SimpleChannel): The party being transferred.
transferer (SimpleChannel): The party initiating the transfer.
targets (list): The channels being dialed.
|
https://github.com/voipgrid/cacofonisk/blob/468c3cecd9800f91c1a3bb904c84473570139b65/cacofonisk/reporters.py#L82-L95
|
import logging
class BaseReporter(object):
def close(self):
pass
def on_event(self, event):
pass
def on_b_dial(self, caller, targets):
pass
def on_up(self, caller, target):
pass
def on_attended_transfer(self, caller, transferer, target):
pass
def on_blonde_transfer(self, caller, transferer, targets):
pass
|
MIT License
|
tedyst/hikload
|
hikvisionapi/RTSPutils.py
|
downloadRTSPOnlyFrames
|
python
|
def downloadRTSPOnlyFrames(url: str, videoName: str, modulo: int, seconds: int = 9999999, debug: bool = False, force: bool = False, skipSeconds: int = 0):
if videoName % 1 == videoName % 2:
raise Exception("videoName cannot be formatted correctly")
if modulo <= 0:
raise Exception("modulo is not valid")
logger.debug("Starting download from: %s" % url)
try:
stream = ffmpeg.input(
url,
rtsp_transport="tcp",
stimeout=1,
t=seconds,
)
stream = ffmpeg.output(
stream,
videoName,
vf="select=not(mod(n\,%s))" % (modulo),
vsync="vfr",
ss=skipSeconds
)
except AttributeError:
raise Exception(
"The version of ffmpeg used is wrong! Be sure to uninstall ffmpeg using pip and install ffmpeg-python or use a virtualenv! For more information see the README!")
if os.path.exists(videoName):
logger.debug(
"The file %s exists, should have been downloaded from %s" % (videoName, url))
if force is False:
logger.warning("%s already exists" % videoName)
return
os.remove(videoName)
if not debug:
return ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
return ffmpeg.run(stream, capture_stdout=False, capture_stderr=False)
|
Downloads an image for every `modulo` frame from an url and saves it to videoName.
Parameters:
url (str): The RTSP link to a stream
videoName (str): The filename of the downloaded stream
should be in the format `{name}_%06d.jpg`
modulo (int):
seconds (int): The maximum number of seconds that should be recorded (default is 9999999)
debug (bool): Enables debug logging (default is False)
force (bool): Forces saving of file (default is False)
skipSeconds (int): The number of seconds that should be skipped when downloading (default is 0)
|
https://github.com/tedyst/hikload/blob/a188d24a30d6c3239a7366602988094d5d4cdc4f/hikvisionapi/RTSPutils.py#L116-L160
|
from operator import mod
import ffmpeg
import os
import logging
logger = logging.getLogger(__name__)
def downloadRTSP(url: str, videoName: str, seconds: int = 9999999, debug: bool = False, force: bool = False, skipSeconds: bool = 0):
logger.debug("Starting download from: %s" % url)
try:
if seconds:
stream = ffmpeg.input(
url,
rtsp_transport="tcp",
stimeout=1,
t=seconds,
)
else:
stream = ffmpeg.input(
url,
rtsp_transport="tcp",
stimeout=1,
)
if skipSeconds:
stream = ffmpeg.output(
stream,
videoName,
ss=skipSeconds
)
else:
stream = ffmpeg.output(
stream,
videoName
)
except AttributeError:
raise Exception(
"The version of ffmpeg used is wrong! Be sure to uninstall ffmpeg using pip and install ffmpeg-python or use a virtualenv! For more information see the README!")
if os.path.exists(videoName):
logger.debug(
"The file %s exists, should have been downloaded from %s" % (videoName, url))
if force is False:
logger.warning("%s already exists" % videoName)
return
os.remove(videoName)
if not debug:
return ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
return ffmpeg.run(stream, capture_stdout=False, capture_stderr=False)
def processSavedVideo(videoName: str, seconds: int = 9999999, debug: bool = False, skipSeconds: int = 0, fileFormat: str = "mp4", forceTranscode: bool = False):
if forceTranscode == False or forceTranscode == None:
if fileFormat == "mp4":
if skipSeconds == None and seconds == None:
logger.debug(
"Skipping processing %s since it is not needed" % videoName)
return
if skipSeconds == 0 and seconds == 9999999:
logger.debug(
"Skipping processing %s since it is not needed" % videoName)
return
logger.debug("Starting processing %s" % videoName)
try:
if seconds:
stream = ffmpeg.input(
videoName,
t=seconds,
)
else:
stream = ffmpeg.input(videoName)
newname = "%s-edited.%s" % (videoName.replace('.mp4', ''), fileFormat)
if skipSeconds:
stream = ffmpeg.output(
stream,
newname,
ss=skipSeconds
)
else:
stream = ffmpeg.output(
stream,
newname,
)
except AttributeError:
raise Exception(
"The version of ffmpeg used is wrong! Be sure to uninstall ffmpeg using pip and install ffmpeg-python or use a virtualenv! For more information see the README!")
if not debug:
ffmpeg.run(stream, capture_stdout=True,
capture_stderr=True, overwrite_output=True)
else:
ffmpeg.run(stream, capture_stdout=False,
capture_stderr=False, overwrite_output=True)
os.remove(videoName)
os.rename(newname, videoName)
|
MIT License
|
ros-industrial/robodk_postprocessors
|
Fanuc_R30iA.py
|
RobotPost.setSpeedJoints
|
python
|
def setSpeedJoints(self, speed_degs):
self.JOINT_SPEED = '%.0f%%' % max(min(100.0*speed_degs/200.0, 100.0), 1)
|
Changes the robot joint speed (in deg/s)
|
https://github.com/ros-industrial/robodk_postprocessors/blob/d7e6c1c07758d67d2906cfd638049bdff88cca72/Fanuc_R30iA.py#L431-L434
|
def get_safe_name(progname, max_chars = 10):
for c in r'-[]/\;,><&*:%=+@!#^()|?^':
progname = progname.replace(c,'')
if len(progname) <= 0:
progname = 'Program'
if progname[0].isdigit():
progname = 'P' + progname
if len(progname) > max_chars:
progname = progname[:max_chars]
return progname
from robodk import *
import sys
class RobotPost(object):
PROG_EXT = 'LS'
MAX_LINES_X_PROG = 9999
INCLUDE_SUB_PROGRAMS = True
JOINT_SPEED = '20%'
SPEED = '500mm/sec'
CNT_VALUE = 'FINE'
ACTIVE_UF = 9
ACTIVE_UT = 9
SPARE_PR = 9
LINE_COUNT = 0
P_COUNT = 0
nProgs = 0
LBL_ID_COUNT = 0
ROBOT_POST = ''
ROBOT_NAME = ''
PROG_FILES = []
PROG_NAMES = []
PROG_LIST = []
PROG_NAME = 'unknown'
PROG_NAME_CURRENT = 'unknown'
nPages = 0
PROG_NAMES_MAIN = []
PROG = []
PROG_TARGETS = []
LOG = ''
nAxes = 6
AXES_TYPE = ['R','R','R','R','R','R']
AXES_TRACK = []
AXES_TURNTABLE = []
HAS_TRACK = False
HAS_TURNTABLE = False
SPEED_BACKUP = None
LAST_POSE = None
def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs):
self.ROBOT_POST = robotpost
self.ROBOT_NAME = robotname
self.nAxes = robot_axes
self.PROG = []
self.LOG = ''
for k,v in kwargs.items():
if k == 'lines_x_prog':
self.MAX_LINES_X_PROG = v
if k == 'axes_type':
self.AXES_TYPE = v
for i in range(len(self.AXES_TYPE)):
if self.AXES_TYPE[i] == 'T':
self.AXES_TRACK.append(i)
self.HAS_TRACK = True
elif self.AXES_TYPE[i] == 'J':
self.AXES_TURNTABLE.append(i)
self.HAS_TURNTABLE = True
def ProgStart(self, progname, new_page = False):
progname = get_safe_name(progname)
progname_i = progname
if new_page:
if self.nPages == 0:
if len(self.PROG_NAMES_MAIN) > 0:
print("Can't split %s: Two or more programs are split into smaller programs" % progname)
print(self.PROG_NAMES_MAIN)
raise Exception("Only one program at a time can be split into smaller programs")
self.PROG_NAMES_MAIN.append(self.PROG_NAME)
self.nPages = self.nPages + 1
self.nPages = self.nPages + 1
progname_i = "%s%i" % (self.PROG_NAME, self.nPages)
self.PROG_NAMES_MAIN.append(progname_i)
else:
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.PROG_NAME = progname
self.nProgs = self.nProgs + 1
self.PROG_NAME_CURRENT = progname_i
self.PROG_NAMES.append(progname_i)
def ProgFinish(self, progname, new_page = False):
progname = get_safe_name(progname)
if not new_page:
self.nPages = 0
header = ''
header = header + ('/PROG %s' % self.PROG_NAME_CURRENT) + '\n'
header = header + '/ATTR' + '\n'
header = header + 'OWNER\t\t= MNEDITOR;' + '\n'
header = header + 'COMMENT\t\t= "RoboDK sequence";' + '\n'
header = header + 'PROG_SIZE\t= 0;' + '\n'
header = header + 'CREATE\t\t= DATE 31-12-14 TIME 12:00:00;' + '\n'
header = header + 'MODIFIED\t= DATE 31-12-14 TIME 12:00:00;' + '\n'
header = header + 'FILE_NAME\t= ;' + '\n'
header = header + 'VERSION\t\t= 0;' + '\n'
header = header + ('LINE_COUNT\t= %i;' % (self.LINE_COUNT)) + '\n'
header = header + 'MEMORY_SIZE\t= 0;' + '\n'
header = header + 'PROTECT\t\t= READ_WRITE;' + '\n'
header = header + 'TCD: STACK_SIZE\t= 0,' + '\n'
header = header + ' TASK_PRIORITY\t= 50,' + '\n'
header = header + ' TIME_SLICE\t= 0,' + '\n'
header = header + ' BUSY_LAMP_OFF\t= 0,' + '\n'
header = header + ' ABORT_REQUEST\t= 0,' + '\n'
header = header + ' PAUSE_REQUEST\t= 0;' + '\n'
header = header + 'DEFAULT_GROUP\t= 1,*,*,*,*,*,*;' + '\n'
header = header + 'CONTROL_CODE\t= 00000000 00000000;' + '\n'
if self.HAS_TURNTABLE:
header = header + '/APPL' + '\n'
header = header + '' + '\n'
header = header + 'LINE_TRACK;' + '\n'
header = header + 'LINE_TRACK_SCHEDULE_NUMBER : 0;' + '\n'
header = header + 'LINE_TRACK_BOUNDARY_NUMBER : 0;' + '\n'
header = header + 'CONTINUE_TRACK_AT_PROG_END : FALSE;' + '\n'
header = header + '' + '\n'
header = header + '/MN'
self.PROG.insert(0, header)
self.PROG.append('/POS')
self.PROG += self.PROG_TARGETS
self.PROG.append('/END')
self.PROG_LIST.append(self.PROG)
self.PROG = []
self.PROG_TARGETS = []
self.LINE_COUNT = 0
self.P_COUNT = 0
self.LBL_ID_COUNT = 0
def progsave(self, folder, progname, ask_user = False, show_result = False):
print(folder)
if not folder.endswith('/'):
folder = folder + '/'
progname = progname + '.' + self.PROG_EXT
if ask_user or not DirExists(folder):
filesave = getSaveFile(folder, progname, 'Save program as...')
if filesave is not None:
filesave = filesave.name
else:
return
else:
filesave = folder + progname
fid = open(filesave, "w")
for line in self.PROG:
fid.write(line)
fid.write('\n')
fid.close()
print('SAVED: %s\n' % filesave)
self.PROG_FILES.append(filesave)
if show_result:
if type(show_result) is str:
import subprocess
p = subprocess.Popen([show_result, filesave])
elif type(show_result) is list:
import subprocess
p = subprocess.Popen(show_result + [filesave])
else:
import os
os.startfile(filesave)
PATH_MAKE_TP = 'C:/Program Files (x86)/FANUC/WinOLPC/bin/'
if FileExists(PATH_MAKE_TP + 'MakeTP.exe'):
filesave_TP = filesave[:-3] + '.TP'
print("POPUP: Compiling LS file with MakeTP.exe: %s..." % progname)
sys.stdout.flush()
import subprocess
command = [PATH_MAKE_TP + 'MakeTP.exe', filesave.replace('/','\\'), filesave_TP.replace('/','\\'), '/config', PATH_MAKE_TP + 'robot.ini']
self.LOG += 'Program generation for: ' + progname + '\n'
with subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
line_ok = line.strip()
self.LOG += line_ok + '\n'
print("POPUP: " + line_ok)
sys.stdout.flush()
self.LOG += '\n'
def ProgSave(self, folder, progname, ask_user = False, show_result = False):
progname = get_safe_name(progname)
nfiles = len(self.PROG_LIST)
if nfiles >= 1:
if self.LINE_COUNT > 0:
print("Warning: ProgFinish was not called properly")
self.PROG_LIST.append(self.PROG)
self.PROG_NAMES.append("Unknown")
self.PROG = []
self.LINE_COUNT = 0
if len(self.PROG_NAMES_MAIN) > 1:
progname_main = "M_" + self.PROG_NAMES_MAIN[0]
self.INCLUDE_SUB_PROGRAMS = True
self.ProgStart(progname_main)
for prog_call in self.PROG_NAMES_MAIN:
self.RunCode(prog_call, True)
self.ProgFinish(progname_main)
self.PROG = self.PROG_LIST.pop()
progname_last = self.PROG_NAMES.pop()
self.progsave(folder, progname_last, ask_user, show_result)
if len(self.PROG_FILES) == 0:
return
first_file = self.PROG_FILES[0]
folder_user = getFileDir(first_file)
for i in range(len(self.PROG_LIST)):
self.PROG = self.PROG_LIST[i]
self.progsave(folder_user, self.PROG_NAMES[i], False, show_result)
elif nfiles == 1:
self.PROG = self.PROG_NAMES[0]
self.progsave(folder, progname, ask_user, show_result)
else:
print("Warning! Program has not been properly finished")
self.progsave(folder, progname, ask_user, show_result)
if show_result and len(self.LOG) > 0:
mbox('Program generation LOG:\n\n' + self.LOG)
def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass):
UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass)
def MoveJ(self, pose, joints, conf_RLF=None):
self.page_size_control()
target_id = self.add_target_joints(pose, joints)
move_ins = 'P[%i] %s %s ;' % (target_id, self.JOINT_SPEED, self.CNT_VALUE)
self.addline(move_ins, 'J')
self.LAST_POSE = pose
def MoveL(self, pose, joints, conf_RLF=None):
self.page_size_control()
if pose is None:
target_id = self.add_target_joints(pose, joints)
move_ins = 'P[%i] %s %s ;' % (target_id, self.SPEED, self.CNT_VALUE)
else:
target_id = self.add_target_cartesian(pose, joints, conf_RLF)
move_ins = 'P[%i] %s %s ;' % (target_id, self.SPEED, self.CNT_VALUE)
self.addline(move_ins, 'L')
self.LAST_POSE = pose
def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None):
self.page_size_control()
target_id1 = self.add_target_cartesian(pose1, joints1, conf_RLF_1)
target_id2 = self.add_target_cartesian(pose2, joints2, conf_RLF_2)
move_ins = 'P[%i] \n P[%i] %s %s ;' % (target_id1, target_id2, self.SPEED, self.CNT_VALUE)
self.addline(move_ins, 'C')
self.LAST_POSE = pose2
def setFrame(self, pose, frame_id=None, frame_name=None):
xyzwpr = Pose_2_Fanuc(pose)
if frame_id is None or frame_id < 0:
for i in range(6):
self.addline('PR[%i,%i]=%.3f ;' % (self.SPARE_PR, i+1, xyzwpr[i]))
for i in range(6,self.nAxes):
self.addline('PR[%i,%i]=%.3f ;' % (self.SPARE_PR, i+1, 0))
self.addline('UFRAME[%i]=PR[%i] ;' % (self.ACTIVE_UF, self.SPARE_PR))
self.addline('UFRAME_NUM=%i ;' % (self.ACTIVE_UF))
else:
self.ACTIVE_UF = frame_id
self.addline('UFRAME_NUM=%i ;' % (self.ACTIVE_UF))
self.RunMessage('UF%i:%.1f,%.1f,%.1f,%.1f,%.1f,%.1f' % (frame_id, xyzwpr[0], xyzwpr[1], xyzwpr[2], xyzwpr[3], xyzwpr[4], xyzwpr[5]), True)
def setTool(self, pose, tool_id=None, tool_name=None):
xyzwpr = Pose_2_Fanuc(pose)
if tool_id is None or tool_id < 0:
for i in range(6):
self.addline('PR[%i,%i]=%.3f ;' % (self.SPARE_PR, i+1, xyzwpr[i]))
for i in range(6,self.nAxes):
self.addline('PR[%i,%i]=%.3f ;' % (self.SPARE_PR, i+1, 0))
self.addline('UTOOL[%i]=PR[%i] ;' % (self.ACTIVE_UT, self.SPARE_PR))
self.addline('UTOOL_NUM=%i ;' % (self.ACTIVE_UT))
else:
self.ACTIVE_UT = tool_id
self.addline('UTOOL_NUM=%i ;' % (self.ACTIVE_UT))
self.RunMessage('UT%i:%.1f,%.1f,%.1f,%.1f,%.1f,%.1f' % (tool_id, xyzwpr[0], xyzwpr[1], xyzwpr[2], xyzwpr[3], xyzwpr[4], xyzwpr[5]), True)
def Pause(self, time_ms):
if time_ms <= 0:
self.addline('PAUSE ;')
else:
self.addline('WAIT %.2f(sec) ;' % (time_ms*0.001))
def setSpeed(self, speed_mms):
if self.SPEED_BACKUP is None:
self.SPEED = '%.0fmm/sec' % max(speed_mms, 0.01)
self.JOINT_SPEED = '%.0f%%' % max(min(100.0*speed_mms/5000.0, 100.0), 1)
else:
self.SPEED_BACKUP = '%.0fmm/sec' % max(speed_mms, 0.01)
def setAcceleration(self, accel_mmss):
self.addlog('setAcceleration not defined')
|
Apache License 2.0
|
cgatoxford/cgatpipelines
|
obsolete/pipeline_mapping_benchmark.py
|
loadBAMStats
|
python
|
def loadBAMStats(infiles, outfile):
scriptsdir = PARAMS["general_scriptsdir"]
header = ",".join([P.snip(os.path.basename(x), ".readstats")
for x in infiles])
filenames = " ".join(["<( cut -f 1,2 < %s)" % x for x in infiles])
tablename = P.toTable(outfile)
E.info("loading bam stats - summary")
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/track/"
| perl -p -e "s/unique/unique_alignments/"
| cgat table2table --transpose
| cgat csv2db
--allow-empty-file
--add-index=track
--table=%(tablename)s
> %(outfile)s"""
P.run()
for suffix in ("nm", "nh"):
E.info("loading bam stats - %s" % suffix)
filenames = " ".join(["%s.%s" % (x, suffix) for x in infiles])
tname = "%s_%s" % (tablename, suffix)
statement = """cgat combine_tables
--header-names=%(header)s
--skip-titles
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/%(suffix)s/"
| cgat csv2db
--table=%(tname)s
--allow-empty-file
>> %(outfile)s """
P.run()
|
Import bam statistics into SQLite
|
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/obsolete/pipeline_mapping_benchmark.py#L207-L247
|
import sys
import os
import CGAT.Experiment as E
from ruffus import *
import pysam
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.Pipeline as P
USECLUSTER = True
P.getParameters(["%s/pipeline.ini" %
os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"])
PARAMS = P.PARAMS
bowtie_options = {'n0m1': "-n 0 -a --best --strata -m 1 -3 1", 'n1m1': "-n 1 -a --best --strata -m 1 -3 1", 'n2m1': "-n 2 -a --best --strata -m 1 -3 1", 'n3m1': "-n 3 -a --best --strata -m 1 -3 1",
'n0m2': "-n 0 -a --best --strata -m 2 -3 1", 'n1m2': "-n 1 -a --best --strata -m 2 -3 1", 'n2m2': "-n 2 -a --best --strata -m 2 -3 1", 'n3m2': "-n 3 -a --best --strata -m 2 -3 1",
'n0m3': "-n 0 -a --best --strata -m 3 -3 1", 'n1m3': "-n 1 -a --best --strata -m 3 -3 1", 'n2m3': "-n 2 -a --best --strata -m 3 -3 1", 'n3m3': "-n 3 -a --best --strata -m 3 -3 1",
'n0m4': "-n 0 -a --best --strata -m 4 -3 1", 'n1m4': "-n 1 -a --best --strata -m 4 -3 1", 'n2m4': "-n 2 -a --best --strata -m 4 -3 1", 'n3m4': "-n 3 -a --best --strata -m 4 -3 1",
'n0m5': "-n 0 -a --best --strata -m 5 -3 1", 'n1m5': "-n 1 -a --best --strata -m 5 -3 1", 'n2m5': "-n 2 -a --best --strata -m 5 -3 1", 'n3m5': "-n 3 -a --best --strata -m 5 -3 1",
'v0m1': "-v 0 -a --best --strata -m 1 -3 1", 'v1m1': "-v 1 -a --best --strata -m 1 -3 1", 'v2m1': "-v 2 -a --best --strata -m 1 -3 1", 'v3m1': "-v 3 -a --best --strata -m 1 -3 1",
'v0m2': "-v 0 -a --best --strata -m 2 -3 1", 'v1m2': "-v 1 -a --best --strata -m 2 -3 1", 'v2m2': "-v 2 -a --best --strata -m 2 -3 1", 'v3m2': "-v 3 -a --best --strata -m 2 -3 1",
'v0m3': "-v 0 -a --best --strata -m 3 -3 1", 'v1m3': "-v 1 -a --best --strata -m 3 -3 1", 'v2m3': "-v 2 -a --best --strata -m 3 -3 1", 'v3m3': "-v 3 -a --best --strata -m 3 -3 1",
'v0m4': "-v 0 -a --best --strata -m 4 -3 1", 'v1m4': "-v 1 -a --best --strata -m 4 -3 1", 'v2m4': "-v 2 -a --best --strata -m 4 -3 1", 'v3m4': "-v 3 -a --best --strata -m 4 -3 1",
'v0m5': "-v 0 -a --best --strata -m 5 -3 1", 'v1m5': "-v 1 -a --best --strata -m 5 -3 1", 'v2m5': "-v 2 -a --best --strata -m 5 -3 1", 'v3m5': "-v 3 -a --best --strata -m 5 -3 1"}
@files([(PARAMS["test_file"], "%s.bam" % x, bowtie_options.get(x)) for x in list(bowtie_options.keys())])
def buildBAM(infile, outfile, options):
job_threads = PARAMS["bowtie_threads"]
m = PipelineMapping.Bowtie()
reffile = PARAMS["samtools_genome"]
bowtie_options = options
statement = m.build((infile,), outfile)
P.run()
@transform(buildBAM,
regex(r"(\S+).bam"),
r"\1.nsrt.bam")
def sortByName(infile, outfile):
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''samtools sort -n %(infile)s %(track)s;'''
P.run()
@transform(sortByName,
regex(r"(\S+).nsrt.bam"),
r"\1.nh.bam")
def addNHTag(infile, outfile):
to_cluster = USECLUSTER
inf = pysam.Samfile(infile, "rb")
outf = pysam.Samfile(outfile, "wb", template=inf)
for readset in read_sets(inf, keep_unmapped=True):
nh = len(readset)
for read in readset:
if (read.is_unmapped):
nh = 0
read.tags = read.tags + [("NH", nh)]
outf.write(read)
inf.close()
outf.close()
@transform(addNHTag,
regex(r"(\S+).bam"),
r"\1.srt.bam")
def sortByPosition(infile, outfile):
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''samtools sort %(infile)s %(track)s;'''
P.run()
@transform(sortByPosition,
regex(r"(\S+).nh.srt.bam"),
r"\1.dedup.bam")
def dedup(infiles, outfile):
to_cluster = USECLUSTER
track = P.snip(outfile, ".bam")
statement = '''MarkDuplicates INPUT=%(infiles)s ASSUME_SORTED=true OUTPUT=%(outfile)s METRICS_FILE=%(track)s.dupstats VALIDATION_STRINGENCY=SILENT; ''' % locals(
)
statement += '''samtools index %(outfile)s; ''' % locals()
P.run()
@merge(dedup, "picard_duplicate_stats.load")
def loadPicardDuplicateStats(infiles, outfile):
tablename = P.toTable(outfile)
outf = open('dupstats.txt', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".dedup.bam")
statfile = P.snip(f, ".bam") + ".dupstats"
if not os.path.exists(statfile):
E.warn("File %s missing" % statfile)
continue
lines = [x for x in open(
statfile, "r").readlines() if not x.startswith("#") and x.strip()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
outf.write("%s\t%s" % (track, lines[1]))
outf.close()
tmpfilename = outf.name
statement = '''cat %(tmpfilename)s
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
'''
P.run()
@transform(dedup,
regex(r"(\S+).dedup.bam"),
r"\1.readstats")
def buildBAMStats(infile, outfile):
to_cluster = USECLUSTER
scriptsdir = PARAMS["general_scriptsdir"]
statement = '''cgat bam2stats --force-output
--output-filename-pattern=%(outfile)s.%%s < %(infile)s > %(outfile)s'''
P.run()
@merge(buildBAMStats, "bam_stats.load")
|
MIT License
|
uwbmrb/pynmrstar
|
pynmrstar/loop.py
|
Loop.set_category
|
python
|
def set_category(self, category: str) -> None:
self.category = utils.format_category(category)
|
Set the category of the loop. Useful if you didn't know the
category at loop creation time.
|
https://github.com/uwbmrb/pynmrstar/blob/c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171/pynmrstar/loop.py#L935-L939
|
import json
import warnings
from copy import deepcopy
from csv import reader as csv_reader, writer as csv_writer
from io import StringIO
from itertools import chain
from typing import TextIO, BinaryIO, Union, List, Optional, Any, Dict, Callable, Tuple
from pynmrstar import definitions, utils, entry as entry_mod
from pynmrstar._internal import _json_serialize, _interpret_file
from pynmrstar.exceptions import InvalidStateError
from pynmrstar.parser import Parser
from pynmrstar.schema import Schema
class Loop(object):
def __contains__(self, item: Any) -> bool:
if isinstance(item, (list, tuple)):
to_process: List[str] = list(item)
elif isinstance(item, str):
to_process = [item]
else:
return False
lc_tags = self._lc_tags
for tag in to_process:
if utils.format_tag(tag).lower() not in lc_tags:
return False
return True
def __eq__(self, other) -> bool:
if not isinstance(other, Loop):
return False
return (self.category, self._tags, self.data) == (other.category, other._tags, other.data)
def __getitem__(self, item: Union[int, str, List[str], Tuple[str]]) -> list:
try:
return self.data[item]
except TypeError:
if isinstance(item, tuple):
item = list(item)
return self.get_tag(tags=item)
def __init__(self, **kwargs) -> None:
self._tags: List[str] = []
self.data: List[List[Any]] = []
self.category: Optional[str] = None
self.source: str = "unknown"
star_buffer: StringIO = StringIO("")
if 'source' in kwargs:
self.source = kwargs['source']
if 'category' in kwargs:
self.category = utils.format_category(kwargs['category'])
return
if len(kwargs) == 0:
raise ValueError("You should not directly instantiate a Loop using this method. Instead use the "
"class methods: Loop.from_scratch(), Loop.from_string(), Loop.from_template(), "
"Loop.from_file(), and Loop.from_json().")
if 'the_string' in kwargs:
star_buffer = StringIO(kwargs['the_string'])
self.source = "from_string()"
elif 'file_name' in kwargs:
star_buffer = _interpret_file(kwargs['file_name'])
self.source = f"from_file('{kwargs['file_name']}')"
elif 'tag_prefix' in kwargs:
tags = Loop._get_tags_from_schema(kwargs['tag_prefix'], all_tags=kwargs['all_tags'],
schema=kwargs['schema'])
for tag in tags:
self.add_tag(tag)
return
if 'csv' in kwargs and kwargs['csv']:
csv_file = csv_reader(star_buffer)
self.add_tag(next(csv_file))
for row in csv_file:
self.add_data(row, convert_data_types=kwargs.get('convert_data_types', False))
self.source = f"from_csv('{kwargs['csv']}')"
return
tmp_entry = entry_mod.Entry.from_scratch(0)
star_buffer = StringIO(f"data_0 save_internaluseyoushouldntseethis_frame _internal.use internal "
f"{star_buffer.read()} save_")
parser = Parser(entry_to_parse_into=tmp_entry)
parser.parse(star_buffer.read(), source=self.source, convert_data_types=kwargs.get('convert_data_types', False))
if len(tmp_entry[0].loops) > 1:
raise ValueError("You attempted to parse one loop but the source you provided had more than one loop. "
"Please either parse all loops as a saveframe or only parse one loop. Loops detected: " +
str(tmp_entry[0].loops))
self._tags = tmp_entry[0][0].tags
self.data = tmp_entry[0][0].data
self.category = tmp_entry[0][0].category
def __iter__(self) -> list:
for row in self.data:
yield row
def __len__(self) -> int:
return len(self.data)
def __lt__(self, other) -> bool:
if not isinstance(other, Loop):
return NotImplemented
return self.category < other.category
def __repr__(self) -> str:
return f"<pynmrstar.Loop '{self.category}'>"
def __setitem__(self, key: str, item: Any) -> None:
tag = utils.format_tag(key)
if tag not in self._tags:
raise ValueError(f"Cannot assign to tag '{key}' as it does not exist in this loop.")
tag_id = self._tags.index(tag)
if len(self[key]) != len(item):
raise ValueError("To assign to a tag you must provide a list (or iterable) of a length equal to the "
f"number of values that currently exist for that tag. The tag '{key}' currently has"
f" {len(self[key])} values and you supplied {len(item)} values.")
for pos, row in enumerate(self.data):
row[tag_id] = item[pos]
def __str__(self, skip_empty_loops: bool = False, skip_empty_tags: bool = False) -> str:
if len(self.data) == 0:
if skip_empty_loops:
return ""
else:
if len(self._tags) == 0:
return "\n loop_\n\n stop_\n"
if len(self._tags) == 0:
raise InvalidStateError("Impossible to print data if there are no associated tags. Error in loop "
f"'{self.category}' which contains data but hasn't had any tags added.")
self._check_tags_match_data()
if skip_empty_tags:
has_data = [not all([_ in definitions.NULL_VALUES for _ in column]) for column in zip(*self.data)]
return self.filter([tag for x, tag in enumerate(self._tags) if has_data[x]]).format()
return_chunks = ["\n loop_\n"]
format_string = " %-s\n"
if self.category is None:
raise InvalidStateError("The category was never set for this loop. Either add a tag with the category "
"intact, specify it when generating the loop, or set it using Loop.set_category().")
if self.category is None:
for tag in self._tags:
return_chunks.append(format_string % tag)
else:
for tag in self._tags:
return_chunks.append(format_string % (self.category + "." + tag))
return_chunks.append("\n")
if len(self.data) != 0:
working_data = []
title_widths = [4]*len(self.data[0])
for row_pos, row in enumerate(self.data):
clean_row = []
for col_pos, x in enumerate(row):
try:
clean_val = utils.quote_value(x)
clean_row.append(clean_val)
length = len(clean_val) + 3
if length > title_widths[col_pos] and "\n" not in clean_val:
title_widths[col_pos] = length
except ValueError:
raise InvalidStateError('Cannot generate NMR-STAR for entry, as empty strings are not valid '
'tag values in NMR-STAR. Please either replace the empty strings with'
' None objects, or set pynmrstar.definitions.STR_CONVERSION_DICT['
'\'\'] = None.\n'
f'Loop: {self.category} Row: {row_pos} Column: {col_pos}')
working_data.append(clean_row)
format_string = " " + "%-*s" * len(self._tags) + " \n"
for datum in working_data:
for pos, item in enumerate(datum):
if "\n" in item:
datum[pos] = "\n;\n%s;\n" % item
tag_width_list = [d for d in zip(title_widths, datum)]
return_chunks.append(format_string % tuple(chain.from_iterable(tag_width_list)))
return "".join(return_chunks) + "\n stop_\n"
@property
def _lc_tags(self) -> Dict[str, int]:
return {_[1].lower(): _[0] for _ in enumerate(self._tags)}
@property
def empty(self) -> bool:
for row in self.data:
for col in row:
if col not in definitions.NULL_VALUES:
return False
return True
@property
def tags(self) -> List[str]:
return self._tags
@classmethod
def from_file(cls, the_file: Union[str, TextIO, BinaryIO], csv: bool = False, convert_data_types: bool = False):
return cls(file_name=the_file, csv=csv, convert_data_types=convert_data_types)
@classmethod
def from_json(cls, json_dict: Union[dict, str]):
if not isinstance(json_dict, dict):
try:
json_dict = json.loads(json_dict)
except (TypeError, ValueError):
raise ValueError("The JSON you provided was neither a Python dictionary nor a JSON string.")
for check in ['tags', 'category', 'data']:
if check not in json_dict:
raise ValueError(f"The JSON you provide must be a dictionary and must contain the key '{check}' - even"
f" if the key points to None.")
ret = Loop.from_scratch()
ret._tags = json_dict['tags']
ret.category = json_dict['category']
ret.data = json_dict['data']
ret.source = "from_json()"
return ret
@classmethod
def from_scratch(cls, category: str = None, source: str = "from_scratch()"):
return cls(category=category, source=source)
@classmethod
def from_string(cls, the_string: str, csv: bool = False, convert_data_types: bool = False):
return cls(the_string=the_string, csv=csv, convert_data_types=convert_data_types)
@classmethod
def from_template(cls, tag_prefix: str, all_tags: bool = False, schema: Schema = None):
schema = utils.get_schema(schema)
return cls(tag_prefix=tag_prefix, all_tags=all_tags,
schema=schema, source=f"from_template({schema.version})")
@staticmethod
def _get_tags_from_schema(category: str, schema: Schema = None, all_tags: bool = False) -> List[str]:
schema = utils.get_schema(schema)
if not category.startswith("_"):
category = "_" + category
if not category.endswith("."):
category = category + "."
tags = []
for item in schema.schema_order:
if item.lower().startswith(category.lower()):
if all_tags:
tags.append(item)
else:
if schema.schema[item.lower()]["public"] != "I":
tags.append(item)
if len(tags) == 0:
raise InvalidStateError(f"The tag prefix '{category}' has no corresponding tags in the dictionary.")
return tags
def _check_tags_match_data(self) -> bool:
if len(self.data) > 0:
for x, row in enumerate(self.data):
if len(self._tags) != len(row):
raise InvalidStateError(f"The number of tags must match the width of the data. Error in loop "
f"'{self.category}'. In this case, there are {len(self._tags)} tags, and "
f"row number {x} has {len(row)} tags.")
return True
def add_data(self, the_list: List[Any], rearrange: bool = False, convert_data_types: bool = False):
if not rearrange:
if len(the_list) != len(self._tags):
raise ValueError("The list must have the same number of elements as the number of tags when adding a "
"single row of values! Insert tag names first by calling Loop.add_tag().")
self.data.append(the_list)
return
processed_data = [the_list[x:x + len(self._tags)] for x in range(0, len(the_list), len(self._tags))]
if len(processed_data[-1]) != len(self._tags):
raise ValueError(f"The number of data elements in the list you provided is not an even multiple of the "
f"number of tags which are set in the loop. Please either add missing tags using "
f"Loop.add_tag() or modify the list of tag values you are adding to be an even multiple "
f"of the number of tags. Error in loop '{self.category}'.")
if convert_data_types:
schema = utils.get_schema()
for row in processed_data:
for tag_id, datum in enumerate(row):
row[tag_id] = schema.convert_tag(self.category + "." + self._tags[tag_id], datum)
self.data.extend(processed_data)
def add_data_by_tag(self, tag_name: str, value) -> None:
warnings.warn("Deprecated: It is recommended to use Loop.add_data() instead for most use cases.",
DeprecationWarning)
if "." in tag_name:
supplied_category = utils.format_category(str(tag_name))
if supplied_category.lower() != self.category.lower():
raise ValueError(f"Category provided in your tag '{supplied_category}' does not match this loop's "
f"category '{self.category}'.")
pos = self.tag_index(tag_name)
if pos is None:
raise ValueError(f"The tag '{tag_name}' to which you are attempting to add data does not yet exist. Create "
f"the tags using Loop.add_tag() before adding data.")
if len(self.data) == 0:
self.data.append([])
if len(self.data[-1]) == len(self._tags):
self.data.append([])
if len(self.data[-1]) != pos:
raise ValueError("You cannot add data out of tag order.")
self.data[-1].append(value)
def add_missing_tags(self, schema: 'Schema' = None, all_tags: bool = False) -> None:
self.add_tag(Loop._get_tags_from_schema(self.category, schema=schema, all_tags=all_tags),
ignore_duplicates=True, update_data=True)
self.sort_tags()
try:
self.sort_rows("Ordinal")
except ValueError:
pass
except TypeError:
ordinal_idx = self.tag_index("Ordinal")
for pos, row in enumerate(self.data):
row[ordinal_idx] = pos + 1
def add_tag(self, name: Union[str, List[str]], ignore_duplicates: bool = False, update_data: bool = False) -> None:
if isinstance(name, (list, tuple)):
for item in name:
self.add_tag(item, ignore_duplicates=ignore_duplicates, update_data=update_data)
return
name = name.strip()
if "." in name:
if name[0] != ".":
category = name[0:name.index(".")]
if category[:1] != "_":
category = "_" + category
if self.category is None:
self.category = category
elif self.category.lower() != category.lower():
raise ValueError("One loop cannot have tags with different categories (or tags that don't "
f"match the loop category)! The loop category is '{self.category}' while "
f"the category in the tag was '{category}'.")
name = name[name.index(".") + 1:]
else:
name = name[1:]
if self.tag_index(name) is not None:
if ignore_duplicates:
return
else:
raise ValueError(f"There is already a tag with the name '{name}' in the loop '{self.category}'.")
if name in definitions.NULL_VALUES:
raise ValueError(f"Cannot use a null-equivalent value as a tag name. Invalid tag name: '{name}'")
if "." in name:
raise ValueError(f"There cannot be more than one '.' in a tag name. Invalid tag name: '{name}'")
for char in str(name):
if char in utils.definitions.WHITESPACE:
raise ValueError(f"Tag names can not contain whitespace characters. Invalid tag name: '{name}")
self._tags.append(name)
if update_data:
for row in self.data:
row.append(None)
def clear_data(self) -> None:
self.data = []
def compare(self, other) -> List[str]:
diffs = []
if self is other:
return []
if isinstance(other, str):
if str(self) == other:
return []
else:
return ['String was not exactly equal to loop.']
elif not isinstance(other, Loop):
return ['Other object is not of class Loop.']
if str(other) == str(self):
return []
try:
if str(self.category).lower() != str(other.category).lower():
diffs.append(f"\t\tCategory of loops does not match: '{self.category}' vs '{other.category}'.")
if ([x.lower() for x in self._tags] !=
[x.lower() for x in other.tags]):
diffs.append(f"\t\tLoop tag names do not match for loop with category '{self.category}'.")
else:
if self.data != other.data:
self_data = sorted(deepcopy(self.data))
other_data = sorted(deepcopy(other.data))
if self_data != other_data:
diffs.append(f"\t\tLoop data does not match for loop with category '{self.category}'.")
except AttributeError as err:
diffs.append(f"\t\tAn exception occurred while comparing: '{err}'.")
return diffs
def delete_tag(self, tag: Union[str, List[str]]) -> None:
warnings.warn('Please use remove_tag() instead.', DeprecationWarning)
return self.remove_tag(tag)
def delete_data_by_tag_value(self, tag: str, value: Any, index_tag: str = None) -> List[List[Any]]:
warnings.warn('Please use remove_data_by_tag_value() instead.', DeprecationWarning)
return self.remove_data_by_tag_value(tag, value, index_tag)
def filter(self, tag_list: Union[str, List[str], Tuple[str]], ignore_missing_tags: bool = False):
result = Loop.from_scratch()
valid_tags = []
if not isinstance(tag_list, (list, tuple)):
tag_list = [tag_list]
for tag in tag_list:
tag_match_index = self.tag_index(tag)
if tag_match_index is None:
if not ignore_missing_tags:
raise KeyError(f"Cannot filter tag '{tag}' as it isn't present in this loop.")
continue
valid_tags.append(tag)
result.add_tag(self._tags[tag_match_index])
results = self.get_tag(valid_tags)
if len(valid_tags) == 1:
for item in results:
result.add_data([item])
else:
for row in results:
assert isinstance(row, list)
result.add_data(row)
if result.category is None:
result.category = self.category
return result
def format(self, skip_empty_loops: bool = True, skip_empty_tags: bool = False) -> str:
return self.__str__(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags)
def get_data_as_csv(self, header: bool = True, show_category: bool = True) -> str:
csv_buffer = StringIO()
csv_writer_object = csv_writer(csv_buffer)
if header:
if show_category:
csv_writer_object.writerow(
[str(self.category) + "." + str(x) for x in self._tags])
else:
csv_writer_object.writerow([str(x) for x in self._tags])
for row in self.data:
data = []
for piece in row:
data.append(piece)
csv_writer_object.writerow(data)
csv_buffer.seek(0)
return csv_buffer.read().replace('\r\n', '\n')
def get_json(self, serialize: bool = True) -> Union[dict, str]:
loop_dict = {
"category": self.category,
"tags": self._tags,
"data": self.data
}
if serialize:
return json.dumps(loop_dict, default=_json_serialize)
else:
return loop_dict
def get_tag_names(self) -> List[str]:
if not self.category:
raise InvalidStateError("You never set the category of this loop. You must set the category before calling "
"this method, either by setting the loop category directly when creating the loop "
"using the Loop.from_scratch() class method, by calling loop.set_category(), or by "
"adding a fully qualified tag which includes the loop category (for example, "
"adding '_Citation_author.Family_name' rather than just 'Family_name').")
return [self.category + "." + x for x in self._tags]
def get_tag(self, tags: Optional[Union[str, List[str]]] = None, whole_tag: bool = False,
dict_result: bool = False) -> Union[List[Any], List[Dict[str, Any]]]:
if tags is None:
if not dict_result:
return self.data
else:
tags = [self._tags]
if not isinstance(tags, list):
tags = [tags]
lower_tags = deepcopy(tags)
for pos, item in enumerate([str(x) for x in lower_tags]):
if "." in item and utils.format_category(item).lower() != self.category.lower():
raise ValueError(f"Cannot fetch data with tag '{item}' because the category does not match the "
f"category of this loop '{self.category}'.")
lower_tags[pos] = utils.format_tag(item).lower()
tags_lower = [x.lower() for x in self._tags]
tag_mapping = dict(zip(reversed(tags_lower), reversed(range(len(tags_lower)))))
tag_ids = []
for pos, query in enumerate(lower_tags):
if str(query) in tag_mapping:
tag_ids.append(tag_mapping[query])
elif isinstance(query, int):
tag_ids.append(query)
else:
raise KeyError(f"Could not locate the tag with name or ID: '{tags[pos]}' in loop '{self.category}'.")
if not dict_result:
if whole_tag:
result = [[[self.category + "." + self._tags[col_id], row[col_id]]
for col_id in tag_ids] for row in self.data]
else:
result = [[row[col_id] for col_id in tag_ids] for row in self.data]
if len(lower_tags) == 1:
return [x[0] for x in result]
else:
return result
else:
if whole_tag:
result = [dict((self.category + "." + self._tags[col_id], row[col_id]) for col_id in tag_ids) for
row in self.data]
else:
result = [dict((self._tags[col_id], row[col_id]) for col_id in tag_ids) for row in self.data]
return result
def print_tree(self) -> None:
print(repr(self))
def remove_data_by_tag_value(self, tag: str, value: Any, index_tag: str = None) -> List[List[Any]]:
if "." in tag:
supplied_category = utils.format_category(str(tag))
if supplied_category.lower() != self.category.lower():
raise ValueError(f"The category provided in your tag '{supplied_category}' does not match this loop's "
f"category '{self.category}'.")
search_tag = self.tag_index(tag)
if search_tag is None:
raise ValueError(f"The tag you provided '{tag}' isn't in this loop!")
deleted = []
cur_row = 0
while cur_row < len(self.data):
if self.data[cur_row][search_tag] == value:
deleted.append(self.data.pop(cur_row))
continue
cur_row += 1
if index_tag is not None:
self.renumber_rows(index_tag)
return deleted
def remove_tag(self, tag: Union[str, List[str]]) -> None:
if not isinstance(tag, list):
tag = [tag]
for each_tag in tag:
if self.tag_index(each_tag) is None:
raise KeyError(f"There is no tag with name '{each_tag}' to remove in loop '{self.category}'.")
for each_tag in tag:
tag_position: int = self.tag_index(each_tag)
del self._tags[tag_position]
for row in self.data:
del row[tag_position]
def renumber_rows(self, index_tag: str, start_value: int = 1, maintain_ordering: bool = False):
if "." in str(index_tag):
supplied_category = utils.format_category(str(index_tag))
if supplied_category.lower() != self.category.lower():
raise ValueError(f"Category provided in your tag '{supplied_category}' does not match this loop's "
f"category '{self.category}'.")
renumber_tag = self.tag_index(index_tag)
if renumber_tag is None:
try:
renumber_tag = int(index_tag)
except ValueError:
raise ValueError(f"The renumbering tag you provided '{index_tag}' isn't in this loop!")
if len(self.data) == 0:
return
self._check_tags_match_data()
if maintain_ordering:
data_copy = deepcopy(self.data)
offset = 0
for pos in range(0, len(self.data)):
try:
if pos == 0:
offset = start_value - int(self.data[0][renumber_tag])
new_data = int(self.data[pos][renumber_tag]) + offset
if isinstance(self.data[pos][renumber_tag], str):
self.data[pos][renumber_tag] = str(new_data)
else:
self.data[pos][renumber_tag] = new_data
except ValueError:
self.data = data_copy
raise ValueError("You can't renumber a row containing anything that can't be coerced into an "
"integer using maintain_ordering. I.e. what am I suppose to renumber "
f"'{self.data[pos][renumber_tag]}' to?")
else:
for pos in range(0, len(self.data)):
if isinstance(self.data[pos][renumber_tag], str):
self.data[pos][renumber_tag] = str(pos + start_value)
else:
self.data[pos][renumber_tag] = pos + start_value
|
MIT License
|
varianapis/pyesapi
|
pyesapi/stubs/System/Collections/ObjectModel.py
|
Collection.__new__
|
python
|
def __new__(self, list=None):
pass
|
__new__(cls: type)
__new__(cls: type, list: IList[T])
|
https://github.com/varianapis/pyesapi/blob/c7b1d2986cab9387e85dbb4331a44e5b743b86ea/pyesapi/stubs/System/Collections/ObjectModel.py#L251-L257
|
class Collection(object, IList[T], ICollection[T], IEnumerable[T], IEnumerable, IList, ICollection, IReadOnlyList[T], IReadOnlyCollection[T]):
def Add(self, item):
pass
def Clear(self):
pass
def ClearItems(self, *args):
pass
def Contains(self, item):
pass
def CopyTo(self, array, index):
pass
def GetEnumerator(self):
pass
def IndexOf(self, item):
pass
def Insert(self, index, item):
pass
def InsertItem(self, *args):
pass
def Remove(self, item):
pass
def RemoveAt(self, index):
pass
def RemoveItem(self, *args):
pass
def SetItem(self, *args):
pass
def __add__(self, *args):
pass
def __contains__(self, *args):
pass
def __getitem__(self, *args):
pass
def __init__(self, *args):
pass
def __iter__(self, *args):
pass
def __len__(self, *args):
pass
@staticmethod
|
MIT License
|
gwww/elkm1
|
elkm1_lib/message.py
|
pc_encode
|
python
|
def pc_encode(index, function_code, extended_code, seconds):
return MessageEncode(
f"11pc{index_to_housecode(index)}{function_code:02}{extended_code:02}{seconds:04}00",
None,
)
|
pc: Control any PLC device.
|
https://github.com/gwww/elkm1/blob/e84865b6b3a6d4ba1d062eefcada44c123180b9a/elkm1_lib/message.py#L450-L455
|
import datetime as dt
import re
import time
from collections import namedtuple
from .const import Max
MessageEncode = namedtuple("MessageEncode", ["message", "response_command"])
class MessageDecode:
def __init__(self):
self._handlers = {}
def add_handler(self, message_type, handler):
if message_type not in self._handlers:
self._handlers[message_type] = []
if handler not in self._handlers[message_type]:
self._handlers[message_type].append(handler)
def remove_handler(self, message_type, handler):
if message_type not in self._handlers:
return
if handler in self._handlers[message_type]:
self._handlers[message_type].remove(handler)
def call_handlers(self, cmd, decoded_msg):
for handler in self._handlers.get(cmd, []):
handler(**decoded_msg)
def decode(self, msg):
valid, error_msg = _is_valid_length_and_checksum(msg)
if valid:
cmd = msg[2:4]
decoder = getattr(self, f"_{cmd.lower()}_decode", None)
if not decoder:
cmd = "unknown"
decoder = self._unknown_decode
try:
self.call_handlers(cmd, decoder(msg))
except (IndexError, ValueError) as exc:
raise ValueError("Cannot decode message") from exc
return
if not msg or msg.startswith("Username: ") or msg.startswith("Password: "):
return
if "Login successful" in msg:
self.call_handlers("login", {"succeeded": True})
elif msg.startswith("Username/Password not found") or msg == "Disabled":
self.call_handlers("login", {"succeeded": False})
else:
raise ValueError(error_msg)
def _am_decode(self, msg):
return {"alarm_memory": msg[4 : 4 + Max.AREAS.value]}
def _as_decode(self, msg):
return {
"armed_statuses": msg[4:12],
"arm_up_states": msg[12:20],
"alarm_states": msg[20:28],
}
def _az_decode(self, msg):
return {"alarm_status": msg[4 : 4 + Max.ZONES.value]}
def _cr_one_custom_value_decode(self, index, part):
value = int(part[0:5])
value_format = int(part[5])
if value_format == 2:
value = ((value >> 8) & 0xFF, value & 0xFF)
return {"index": index, "value": value, "value_format": value_format}
def _cr_decode(self, msg):
if int(msg[4:6]) > 0:
index = int(msg[4:6]) - 1
return {"values": [self._cr_one_custom_value_decode(index, msg[6:12])]}
part = 6
ret = []
for i in range(Max.SETTINGS.value):
ret.append(self._cr_one_custom_value_decode(i, msg[part : part + 6]))
part += 6
return {"values": ret}
def _cc_decode(self, msg):
return {"output": int(msg[4:7]) - 1, "output_status": msg[7] == "1"}
def _cs_decode(self, msg):
output_status = [x == "1" for x in msg[4 : 4 + Max.OUTPUTS.value]]
return {"output_status": output_status}
def _cv_decode(self, msg):
return {"counter": int(msg[4:6]) - 1, "value": int(msg[6:11])}
def _ee_decode(self, msg):
return {
"area": int(msg[4:5]) - 1,
"is_exit": msg[5:6] == "0",
"timer1": int(msg[6:9]),
"timer2": int(msg[9:12]),
"armed_status": msg[12:13],
}
def _ic_decode(self, msg):
code = msg[4:16]
if re.match(r"(0\d){6}", code):
code = re.sub(r"0(\d)", r"\1", code)
return {
"code": code,
"user": int(msg[16:19]) - 1,
"keypad": int(msg[19:21]) - 1,
}
def _ie_decode(self, _msg):
return {}
def _ka_decode(self, msg):
return {"keypad_areas": [ord(x) - 0x31 for x in msg[4 : 4 + Max.KEYPADS.value]]}
def _kc_decode(self, msg):
return {"keypad": int(msg[4:6]) - 1, "key": int(msg[6:8])}
def _ld_decode(self, msg):
area = int(msg[11]) - 1
hour = int(msg[12:14])
minute = int(msg[14:16])
month = int(msg[16:18])
day = int(msg[18:20])
year = int(msg[24:26]) + 2000
log_local_datetime = dt.datetime(year, month, day, hour, minute)
log_local_time = time.mktime(log_local_datetime.timetuple())
log_gm_timestruct = time.gmtime(log_local_time)
log = {}
log["event"] = int(msg[4:8])
log["number"] = int(msg[8:11])
log["index"] = int(msg[20:23])
log["timestamp"] = dt.datetime(
*log_gm_timestruct[:6], tzinfo=dt.timezone.utc
).isoformat()
return {"area": area, "log": log}
def _lw_decode(self, msg):
keypad_temps = []
zone_temps = []
for i in range(16):
keypad_temps.append(int(msg[4 + 3 * i : 7 + 3 * i]) - 40)
zone_temps.append(int(msg[52 + 3 * i : 55 + 3 * i]) - 60)
return {"keypad_temps": keypad_temps, "zone_temps": zone_temps}
def _pc_decode(self, msg):
housecode = msg[4:7]
return {
"housecode": housecode,
"index": housecode_to_index(housecode),
"light_level": int(msg[7:9]),
}
def _ps_decode(self, msg):
return {
"bank": ord(msg[4]) - 0x30,
"statuses": [ord(x) - 0x30 for x in msg[5:69]],
}
def _rp_decode(self, msg):
return {"remote_programming_status": int(msg[4:6])}
def _rr_decode(self, msg):
return {"real_time_clock": msg[4:20]}
def _sd_decode(self, msg):
desc_ch1 = msg[9]
show_on_keypad = ord(desc_ch1) >= 0x80
if show_on_keypad:
desc_ch1 = chr(ord(desc_ch1) & 0x7F)
return {
"desc_type": int(msg[4:6]),
"unit": int(msg[6:9]) - 1,
"desc": (desc_ch1 + msg[10:25]).rstrip(),
"show_on_keypad": show_on_keypad,
}
def _ss_decode(self, msg):
return {"system_trouble_status": msg[4:-2]}
def _st_decode(self, msg):
group = int(msg[4:5])
temperature = int(msg[7:10])
if group == 0:
temperature -= 60
elif group == 1:
temperature -= 40
return {"group": group, "device": int(msg[5:7]) - 1, "temperature": temperature}
def _tc_decode(self, msg):
return {"task": int(msg[4:7]) - 1}
def _tr_decode(self, msg):
return {
"thermostat_index": int(msg[4:6]) - 1,
"mode": int(msg[6]),
"hold": msg[7] == "1",
"fan": int(msg[8]),
"current_temp": int(msg[9:11]),
"heat_setpoint": int(msg[11:13]),
"cool_setpoint": int(msg[13:15]),
"humidity": int(msg[15:17]),
}
def _ua_decode(self, msg):
return {}
def _vn_decode(self, msg):
elkm1_version = f"{int(msg[4:6], 16)}.{int(msg[6:8], 16)}.{int(msg[8:10], 16)}"
xep_version = (
f"{int(msg[10:12], 16)}.{int(msg[12:14], 16)}.{int(msg[14:16], 16)}"
)
return {"elkm1_version": elkm1_version, "xep_version": xep_version}
def _xk_decode(self, msg):
return {"real_time_clock": msg[4:20]}
def _zb_decode(self, msg):
return {"zone_number": int(msg[4:7]) - 1, "zone_bypassed": msg[7] == "1"}
def _zc_decode(self, msg):
status = _status_decode(int(msg[7:8], 16))
return {"zone_number": int(msg[4:7]) - 1, "zone_status": status}
def _zd_decode(self, msg):
zone_definitions = [ord(x) - 0x30 for x in msg[4 : 4 + Max.ZONES.value]]
return {"zone_definitions": zone_definitions}
def _zp_decode(self, msg):
zone_partitions = [ord(x) - 0x31 for x in msg[4 : 4 + Max.ZONES.value]]
return {"zone_partitions": zone_partitions}
def _zs_decode(self, msg):
status = [_status_decode(int(x, 16)) for x in msg[4 : 4 + Max.ZONES.value]]
return {"zone_statuses": status}
def _zv_decode(self, msg):
return {"zone_number": int(msg[4:7]) - 1, "zone_voltage": int(msg[7:10]) / 10}
def _unknown_decode(self, msg):
return {"msg_code": msg[2:4], "data": msg[4:-2]}
def housecode_to_index(housecode):
match = re.search(r"^([A-P])(\d{1,2})$", housecode.upper())
if match:
house_index = int(match.group(2))
if 1 <= house_index <= 16:
return (ord(match.group(1)) - ord("A")) * 16 + house_index - 1
raise ValueError("Invalid X10 housecode: %s" % housecode)
def index_to_housecode(index):
if index < 0 or index > 255:
raise ValueError
quotient, remainder = divmod(index, 16)
return f"{chr(ord('A') + quotient)}{remainder + 1:02}"
def get_elk_command(line):
if len(line) < 4:
return ""
return line[2:4]
def _status_decode(status):
logical_status = (status & 0b00001100) >> 2
physical_status = status & 0b00000011
return (logical_status, physical_status)
def _is_valid_length_and_checksum(msg):
try:
if int(msg[:2], 16) != (len(msg) - 2):
return False, "Incorrect message length"
checksum = int(msg[-2:], 16)
for char in msg[:-2]:
checksum += ord(char)
if (checksum % 256) != 0:
return False, "Bad checksum"
except ValueError:
return False, "Message invalid"
return True, ""
def al_encode(arm_mode, area, user_code):
return MessageEncode(f"0Da{arm_mode}{area + 1:1}{user_code:06}00", "AS")
def as_encode():
return MessageEncode("06as00", "AS")
def az_encode():
return MessageEncode("06az00", "AZ")
def cf_encode(output):
return MessageEncode(f"09cf{output + 1:03}00", None)
def ct_encode(output):
return MessageEncode(f"09ct{output + 1:03}00", None)
def cn_encode(output, seconds):
return MessageEncode(f"0Ecn{output + 1:03}{seconds:05}00", None)
def cs_encode():
return MessageEncode("06cs00", "CS")
def cp_encode():
return MessageEncode("06cp00", "CR")
def cr_encode(index):
return MessageEncode(f"08cr{index + 1:02}00", "CR")
def cw_encode(index, value, value_format):
if value_format == 2:
value = value[0] * 256 + value[1]
return MessageEncode(f"0Dcw{index + 1:02}{value:05}00", None)
def cv_encode(counter):
return MessageEncode(f"08cv{counter + 1:02}00", "CV")
def cx_encode(counter, value):
return MessageEncode(f"0Dcx{counter + 1:02}{value:05}00", "CV")
def dm_encode(
keypad_area, clear, beep, timeout, line1, line2
):
return MessageEncode(
f"2Edm{keypad_area + 1:1}{clear:1}{beep:1}{timeout:05}{line1:^<16.16}{line2:^<16.16}00",
None,
)
def ka_encode():
return MessageEncode("06ka00", "KA")
def lw_encode():
return MessageEncode("06lw00", "LW")
|
MIT License
|
dkumor/rtcbot
|
rtcbot/arduino.py
|
_serialProtocol.data_received
|
python
|
def data_received(self, data):
self._log.debug("recv %s", data)
self.incomingMessageBuffer += data
if not self.started:
if not self.startByte in self.incomingMessageBuffer:
if len(self.startByte) < len(self.incomingMessageBuffer):
self.incomingMessageBuffer = self.incomingMessageBuffer[
-len(self.startByte) :
]
self._log.debug("Ignoring: start byte %s not found", self.startByte)
return
else:
self._log.debug("startBytes %s found - starting read", self.startByte)
_, self.incomingMessageBuffer = self.incomingMessageBuffer.split(
self.startByte, 1
)
self.started = True
self.onReady(True)
if self.readStruct is not None:
while len(self.incomingMessageBuffer) >= self.readStruct.size:
msg = self.readStruct.unpack(
self.incomingMessageBuffer[: self.readStruct.size]
)
self.incomingMessageBuffer = self.incomingMessageBuffer[
self.readStruct.size :
]
if self.readKeys is not None:
msg = dict(zip(self.readKeys, msg))
self._log.debug("recvmsg: %s", msg)
self.putter(msg)
elif self.readFormat is None:
self.putter(self.incomingMessageBuffer)
self.incomingMessageBuffer = bytes()
else:
outputArray = self.incomingMessageBuffer.split(b"\n")
self.incomingMessageBuffer = outputArray[-1]
for i in range(len(outputArray) - 1):
self._log.debug("recvmsg: %s", outputArray[i])
self.putter(outputArray[i])
|
Internal function. Is called whenever new data shows up on the serial connection.
The function processes this raw data, and if a full message was received,
it is decoded and passed to the readQueue.
Do not call this function.
|
https://github.com/dkumor/rtcbot/blob/ba45b7a5f60d7ed65aab378c84b4c0e6c2770073/rtcbot/arduino.py#L117-L171
|
import asyncio
import logging
import serial
import serial_asyncio
import struct
from .base import SubscriptionProducerConsumer, SubscriptionClosed
class _serialProtocol(asyncio.Protocol):
_log = logging.getLogger("rtcbot.SerialConnection")
def __init__(
self,
putter,
url="/dev/ttyS0",
readFormat="\n",
writeFormat=None,
baudrate=115200,
writeKeys=None,
readKeys=None,
startByte=None,
onReady=lambda x: 0,
loop=None,
):
self.putter = putter
self.onReady = onReady
self.startByte = startByte
self.started = startByte is None
if self.startByte is not None:
try:
len(self.startByte)
except:
self.startByte = bytes([self.startByte])
self.readFormat = readFormat
self.readStruct = None
if readFormat is not None and readFormat != "\n":
self.readStruct = struct.Struct(readFormat)
self.readKeys = readKeys
self.writeStruct = None
if writeFormat is not None:
self.writeStruct = struct.Struct(writeFormat)
self.writeKeys = writeKeys
self.incomingMessageBuffer = bytes()
ser = serial.serial_for_url(url, baudrate=baudrate)
ser.rts = False
if loop is None:
loop = asyncio.get_event_loop()
self.transport = serial_asyncio.SerialTransport(loop, self, ser)
def write(self, msg):
self._log.debug("sendmsg: %s", msg)
if self.isConnected():
if self.writeStruct is not None:
if self.writeKeys is not None:
msg = [msg[key] for key in self.writeKeys]
packedMessage = self.writeStruct.pack(*msg)
self._log.debug("send %s", packedMessage)
self.transport.write(packedMessage)
else:
try:
msg = msg.encode()
except:
pass
self.transport.write(msg)
else:
raise ConnectionError("Serial Connection is closed")
def isConnected(self):
return self.transport is not None and self.started
def connection_made(self, transport):
self._log.debug("Serial Connection Made")
if self.startByte is None:
self.onReady(True)
def connection_lost(self, exc):
self._log.warn("Serial Connection Lost")
self.transport = None
self.onReady(False)
|
MIT License
|
basespace/basespace-python-sdk
|
src/BaseSpacePy/model/QueryParameters.py
|
QueryParameters.validate
|
python
|
def validate(self):
for p in self.required:
if not self.passed.has_key(p):
raise UndefinedParameterException(p)
for p in self.passed.keys():
if not legal.has_key(p):
raise UnknownParameterException(p)
if len(legal[p])>0 and (not self.passed[p] in legal[p]):
raise IllegalParameterException(p,legal[p])
|
Validates that query parameter keys and values are properly formed:
required keys are present, and keys and values are within the set of
known acceptable keys/values.
:raises UndefinedParameterException: when a required parameter is not present
:raises UnknownParameterException: when a parameter name is not present in the list of acceptable parameters names
:raises IllegalParameterException: when a parameter value (with a valid name) is not present in the list of acceptable parameters values
:returns: None
|
https://github.com/basespace/basespace-python-sdk/blob/0e9d30790ec58fcf673534aeb62d3645b063fd14/src/BaseSpacePy/model/QueryParameters.py#L51-L69
|
from BaseSpacePy.api.BaseSpaceException import UndefinedParameterException, UnknownParameterException, IllegalParameterException, QueryParameterException
legal = {'Statuses': [],
'SortBy': ['Id', 'Name', 'DateCreated', 'Path', 'Position'],
'Extensions': [],
'Offset': [],
'Limit': [],
'SortDir': ['Asc', 'Desc'],
'Name': [],
'StartPos':[],
'EndPos':[],
'Format':[]
}
class QueryParameters(object):
def __init__(self, pars=None, required=None):
if pars is None:
pars = {}
if required is None:
required = []
self.passed = {}
try:
for k in pars.keys():
self.passed[k] = pars[k]
except AttributeError:
raise QueryParameterException("The 'pars' argument to QueryParameters must be a dictionary")
self.required = required
def __str__(self):
return str(self.passed)
def __repr__(self):
return str(self)
def getParameterDict(self):
return self.passed
|
Apache License 2.0
|
ratoaq2/knowit
|
knowit/core.py
|
Rule.execute
|
python
|
def execute(self, props, pv_props, context: typing.Mapping):
raise NotImplementedError
|
How to execute a rule.
|
https://github.com/ratoaq2/knowit/blob/e7cc0d786fafdb9dba99b95a1cc95c02f84f0b5b/knowit/core.py#L224-L226
|
import typing
from logging import NullHandler, getLogger
logger = getLogger(__name__)
logger.addHandler(NullHandler())
T = typing.TypeVar('T')
_visible_chars_table = dict.fromkeys(range(32))
def _is_unknown(value: typing.Any) -> bool:
return isinstance(value, str) and (not value or value.lower() == 'unknown')
class Reportable(typing.Generic[T]):
def __init__(
self,
*args: str,
description: typing.Optional[str] = None,
reportable: bool = True,
):
self.names = args
self._description = description
self.reportable = reportable
@property
def description(self) -> str:
return self._description or '|'.join(self.names)
def report(self, value: typing.Union[str, T], context: typing.MutableMapping) -> None:
if not value or not self.reportable:
return
if 'report' in context:
report_map = context['report'].setdefault(self.description, {})
if value not in report_map:
report_map[value] = context['path']
logger.info('Invalid %s: %r', self.description, value)
class Property(Reportable[T]):
def __init__(
self,
*args: str,
default: typing.Optional[T] = None,
private: bool = False,
description: typing.Optional[str] = None,
delimiter: str = ' / ',
**kwargs,
):
super().__init__(*args, description=description, **kwargs)
self.default = default
self.private = private
self.delimiter = delimiter
def extract_value(
self,
track: typing.Mapping,
context: typing.MutableMapping,
) -> typing.Optional[T]:
for name in self.names:
names = name.split('.')
value = track.get(names[0], {}).get(names[1]) if len(names) == 2 else track.get(name)
if value is None:
if self.default is None:
continue
value = self.default
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
value = value.translate(_visible_chars_table).strip()
if _is_unknown(value):
continue
value = self._deduplicate(value)
result = self.handle(value, context)
if result is not None and not _is_unknown(result):
return result
return None
@classmethod
def _deduplicate(cls, value: str) -> str:
values = value.split(' / ')
if len(values) == 2 and values[0] == values[1]:
return values[0]
return value
def handle(self, value: T, context: typing.MutableMapping) -> typing.Optional[T]:
return value
class Configurable(Property[T]):
def __init__(self, config: typing.Mapping[str, typing.Mapping], *args: str,
config_key: typing.Optional[str] = None, **kwargs):
super().__init__(*args, **kwargs)
self.mapping = getattr(config, config_key or self.__class__.__name__) if config else {}
@classmethod
def _extract_key(cls, value: str) -> typing.Union[str, bool]:
return value.upper()
@classmethod
def _extract_fallback_key(cls, value: str, key: str) -> typing.Optional[T]:
return None
def _lookup(
self,
key: str,
context: typing.MutableMapping,
) -> typing.Union[T, None, bool]:
result = self.mapping.get(key)
if result is not None:
result = getattr(result, context.get('profile') or 'default')
return result if result != '__ignored__' else False
return None
def handle(self, value, context):
key = self._extract_key(value)
if key is False:
return
result = self._lookup(key, context)
if result is False:
return
while not result and key:
key = self._extract_fallback_key(value, key)
result = self._lookup(key, context)
if result is False:
return
if not result:
self.report(value, context)
return result
class MultiValue(Property):
def __init__(self, prop: typing.Optional[Property] = None, delimiter='/', single=False,
handler=None, name=None, **kwargs):
super().__init__(*(prop.names if prop else (name,)), **kwargs)
self.prop = prop
self.delimiter = delimiter
self.single = single
self.handler = handler
def handle(
self,
value: str,
context: typing.MutableMapping,
) -> typing.Union[T, typing.List[T]]:
if self.handler:
call = self.handler
elif self.prop:
call = self.prop.handle
else:
raise NotImplementedError('No handler available')
result = call(value, context)
if result is not None:
return result
if isinstance(value, list):
if len(value) == 1:
values = self._split(value[0], self.delimiter)
else:
values = value
else:
values = self._split(value, self.delimiter)
if values is None:
return call(values, context)
if len(values) > 1 and not self.single:
results = [call(item, context) if not _is_unknown(item) else None for item in values]
results = [r for r in results if r is not None]
if results:
return results
return call(values[0], context)
@classmethod
def _split(
cls,
value: typing.Optional[T],
delimiter: str = '/',
) -> typing.Optional[typing.List[str]]:
if value is None:
return None
return [x.strip() for x in str(value).split(delimiter)]
class Rule(Reportable[T]):
def __init__(self, name: str, override=False, **kwargs):
super().__init__(name, **kwargs)
self.override = override
|
MIT License
|
kartverket/midgard
|
midgard/dev/timer.py
|
Timer.end
|
python
|
def end(self) -> float:
time_elapsed = self.pause()
self._log(time_elapsed)
return time_elapsed
|
End the timer and log the time elapsed
Returns:
The time elapsed in seconds.
|
https://github.com/kartverket/midgard/blob/faf8963c9e0e49255c90a60ba5671277912777fd/midgard/dev/timer.py#L103-L112
|
from contextlib import ContextDecorator
import time
from typing import Any, Callable, Optional
from midgard.dev import exceptions
from midgard.dev import log
class Timer(ContextDecorator):
def __init__(
self, text: str = "Elapsed time:", fmt: str = ".4f", logger: Optional[Callable[[str], None]] = log.info
) -> None:
super().__init__()
self._start: Optional[float] = None
self._end: Optional[float] = None
self.text = text if (text is None or "{}" in text) else (text + " {}").strip()
self.fmt = fmt
self.logger = (lambda _: None) if logger is None else logger
@staticmethod
def timer() -> float:
return time.perf_counter()
def start(self) -> None:
self._start = self.timer()
self._end = None
def pause(self) -> float:
self._end = self.timer()
time_elapsed = self.elapsed()
self._start = None
return time_elapsed
|
MIT License
|
putschli/labchain
|
labchain/network/networking.py
|
NetworkInterface.__get_shuffled_dict_items
|
python
|
def __get_shuffled_dict_items(self, dictionary):
dict_list = list(dictionary.items())
random.shuffle(dict_list)
return dict_list
|
Retrieve a shuffled list of peer IP addresses.
|
https://github.com/putschli/labchain/blob/18aa10c3679ad7fcb2286aad1172f322cf238a8d/labchain/network/networking.py#L268-L272
|
import collections
import json
import logging
import random
import socket
import time
from copy import deepcopy
from threading import Thread
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from netifaces import interfaces, ifaddresses, AF_INET, AF_INET6
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from labchain.datastructure.block import Block
from labchain.datastructure.transaction import Transaction
from labchain.network.discover import PeerDiscoverySystem
from labchain.util.TransactionFactory import TransactionFactory
from labchain.util.utility import Utility
logger = logging.getLogger(__name__)
HTTP_BAD_REQUEST = 400
class NodeNotAvailableException(Exception):
pass
class NoPeersException(Exception):
pass
class TransactionDoesNotExistException(Exception):
pass
class BlockDoesNotExistException(Exception):
pass
class NoBlockExistsInRange(Exception):
pass
class UnexpectedResponseException(Exception):
pass
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
class JsonRpcClient:
def __init__(self):
self.id_counter = 0
def send(self, ip_address, port, method, params=tuple()):
url = 'http://{}:{}/'.format(ip_address, port)
headers = {'content-type': 'application/json'}
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": self.id_counter,
}
logger.debug('Sending request {} to {}'.format(str(payload), url))
try:
response = requests.post(url, data=json.dumps(payload),
headers=headers).json()
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as e:
raise NodeNotAvailableException(str(e))
logger.debug('Received response {} from {}'.format(response, url))
self.id_counter += 1
try:
return response['result']
except KeyError:
raise UnexpectedResponseException('Unexpected response from {}: {}'
.format(url, response))
class NetworkInterface:
def __init__(self, json_rpc_client, initial_peers):
self.json_rpc_client = json_rpc_client
self.peers = {}
for ip_address, port_map in initial_peers.items():
for port, info in port_map.items():
self.add_peer(ip_address, port, info)
def sendTransaction(self, transaction):
transaction_dict = transaction.to_dict()
responses = self._bulk_send('sendTransaction', [transaction_dict])
if not responses:
logger.warning('No nodes available to send the transaction to!')
def sendBlock(self, block):
responses = self._bulk_send('sendBlock', [block.to_dict()])
if not responses:
raise NoPeersException('No nodes available to send the block to')
def requestTransaction(self, transaction_hash):
responses = self._bulk_send('requestTransaction', [transaction_hash], return_on_first_success=True)
if responses:
if responses[0]:
transaction, block_hash = responses[0]
return Transaction.from_dict(transaction), block_hash
else:
raise TransactionDoesNotExistException()
else:
raise NoPeersException('No nodes available to request the transaction from')
def requestBlock(self, block_id):
responses = self._bulk_send('requestBlock', [block_id], return_on_first_success=True)
if responses:
if responses[0]:
return [Block.from_dict(block_data) for block_data in responses[0]]
else:
raise BlockDoesNotExistException()
else:
raise NoPeersException('No nodes available to request the block from')
def requestBlockByHash(self, block_hash):
responses = self._bulk_send('requestBlockByHash', [block_hash], return_on_first_success=True)
if responses:
if responses[0]:
return Block.from_dict(responses[0])
else:
raise BlockDoesNotExistException()
else:
raise NoPeersException('No nodes available to request the block from')
def requestBlocksByHashRange(self, block_start_hash=None, block_end_hash=None):
responses = self._bulk_send('requestBlocksByHashRange', [block_start_hash, block_end_hash],
return_on_first_success=True)
res = []
if responses:
if len(responses) > 0:
for block in responses[0]:
res.append(Block.from_dict(block))
else:
raise NoBlockExistsInRange()
else:
raise NoPeersException('No nodes available to request the block from')
return res
def requestAllTransactions(self):
responses = self._bulk_send('requestAllTransactions', return_on_first_success=False)
res = []
if responses:
if len(responses) > 0:
for tx in responses[0]:
res.append(Transaction.from_dict(tx))
else:
raise TransactionDoesNotExistException()
else:
raise NoPeersException('No nodes available to request the transaction from')
return res
def requestTransactionsInPool(self):
responses = self._bulk_send('requestTransactionsInPool', return_on_first_success=True)
res = []
if responses:
if len(responses) > 0:
for tx in responses[0]:
res.append(Transaction.from_dict(tx))
else:
raise Exception('There was a response but it was empty')
else:
raise NoPeersException('No nodes available to request the transactions from')
return res
def get_n_last_transactions(self, n):
responses = self._bulk_send('requestNLastTransaction', [n], return_on_first_success=True)
res = []
if responses:
if len(responses) > 0:
for tx in responses[0]:
res.append(Transaction.from_dict(tx))
else:
raise Exception('There was a response but it was empty')
else:
raise NoPeersException('No nodes available to request the transactions from')
return res
def get_highest_workflow_ID(self):
res = self._bulk_send('getHighestWorkflowID', return_on_first_success=True)
return res
def search_transaction_from_receiver(self, receiver_public_key):
responses = self._bulk_send('searchTransactionFromReceiver', [receiver_public_key],
return_on_first_success=True)
res = []
if responses:
if len(responses) > 0:
for tx in responses[0]:
res.append(Transaction.from_dict(tx))
else:
raise Exception('There was a response but it was empty')
else:
raise NoPeersException('No nodes available to request the transactions from')
return res
def search_transaction_from_sender(self, sender_public_key):
responses = self._bulk_send('searchTransactionFromSender', [sender_public_key], return_on_first_success=True)
res = []
if responses:
if len(responses) > 0:
for tx in responses[0]:
res.append(Transaction.from_dict(tx))
else:
raise Exception('There was a response but it was empty')
else:
raise NoPeersException('No nodes available to request the transactions from')
return res
def add_peer(self, ip_address, port, info=None):
if not Utility.is_valid_ipv4(ip_address) and not Utility.is_valid_ipv6(ip_address):
ip_address = self.__resolve_hostname(ip_address)
if info is None:
info = {}
if ip_address in self.peers and port in self.peers[ip_address] and info == self.peers[ip_address][port]:
logger.debug('Peer {}:{} unchanged. Skipping...'.format(ip_address, str(port)))
return
logger.info('Peer {}:{} added/updated'.format(str(ip_address), str(port)))
update(self.peers, {str(ip_address): {int(port): info}})
logger.debug('My peers are now: {}'.format(str(self.peers)))
def _add_peer_bulk(self, peer_dict):
for ip, port_dict in peer_dict.items():
for port, info in port_dict.items():
self.add_peer(ip, port, info)
|
Apache License 2.0
|
demisto/demisto-sdk
|
demisto_sdk/commands/common/hook_validations/readme.py
|
ReadMeValidator.is_valid_file
|
python
|
def is_valid_file(self) -> bool:
return all([
self.is_image_path_valid(),
self.verify_readme_image_paths(),
self.is_mdx_file(),
self.verify_no_empty_sections(),
self.verify_no_default_sections_left(),
self.verify_readme_is_not_too_short(),
self.is_context_different_in_yml(),
self.verify_demisto_in_readme_content(),
self.verify_template_not_in_readme()
])
|
Check whether the readme file is valid or not
Returns:
bool: True if env configured else Fale.
|
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/common/hook_validations/readme.py#L80-L95
|
import atexit
import json
import os
import re
import subprocess
import tempfile
from functools import lru_cache
from pathlib import Path
from threading import Lock
from typing import Callable, List, Optional
from urllib.parse import urlparse
import click
import requests
from git import InvalidGitRepositoryError
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from demisto_sdk.commands.common.errors import (FOUND_FILES_AND_ERRORS,
FOUND_FILES_AND_IGNORED_ERRORS,
Errors)
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.hook_validations.base_validator import BaseValidator
from demisto_sdk.commands.common.tools import (
compare_context_path_in_yml_and_readme, get_content_path, get_yaml,
get_yml_paths_in_dir, print_warning, run_command_os)
NO_HTML = '<!-- NOT_HTML_DOC -->'
YES_HTML = '<!-- HTML_DOC -->'
SECTIONS = [
'Troubleshooting',
'Use Cases',
'Known Limitations',
'Additional Information'
]
USER_FILL_SECTIONS = [
'FILL IN REQUIRED PERMISSIONS HERE',
'version xx'
]
REQUIRED_MDX_PACKS = ['@mdx-js/mdx', 'fs-extra', 'commander']
PACKS_TO_IGNORE = ['HelloWorld', 'HelloWorldPremium']
DEFAULT_SENTENCES = ['getting started and learn how to build an integration']
class ReadMeValidator(BaseValidator):
_MDX_SERVER_PROCESS: Optional[subprocess.Popen] = None
_MDX_SERVER_LOCK = Lock()
MINIMUM_README_LENGTH = 30
def __init__(self, file_path: str, ignored_errors=None, print_as_warnings=False, suppress_print=False,
json_file_path=None):
super().__init__(ignored_errors=ignored_errors, print_as_warnings=print_as_warnings,
suppress_print=suppress_print, json_file_path=json_file_path)
self.content_path = get_content_path()
self.file_path = Path(file_path)
self.pack_path = self.file_path.parent
self.node_modules_path = self.content_path / Path('node_modules')
with open(self.file_path) as f:
readme_content = f.read()
self.readme_content = readme_content
|
MIT License
|
cohesity/management-sdk-python
|
cohesity_management_sdk/models/user_delete_parameters.py
|
UserDeleteParameters.__init__
|
python
|
def __init__(self,
domain=None,
tenant_id=None,
users=None):
self.domain = domain
self.tenant_id = tenant_id
self.users = users
|
Constructor for the UserDeleteParameters class
|
https://github.com/cohesity/management-sdk-python/blob/1c085d5a10f5f1a87b700e7ad1fc1dcabda41ae5/cohesity_management_sdk/models/user_delete_parameters.py#L34-L43
|
class UserDeleteParameters(object):
_names = {
"domain":'domain',
"tenant_id":'tenantId',
"users":'users'
}
|
Apache License 2.0
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/__init__.py
|
format_frontmatter
|
python
|
def format_frontmatter(values):
return frontmatter.dump_frontmatter(values)
|
Format values as frontmatter.
|
https://github.com/facelessuser/sublime-markdown-popups/blob/aeb7586da26fe46b7764cf1e2832336bc306195d/st3/mdpopups/__init__.py#L847-L850
|
import sublime
from . import markdown
from . import jinja2
import traceback
import time
import codecs
import html
import html.parser
import urllib
import functools
import base64
from . import version as ver
from . import colorbox
from collections import OrderedDict
from .st_scheme_template import SchemeTemplate, POPUP, PHANTOM, SHEET
from .st_clean_css import clean_css
from .st_pygments_highlight import syntax_hl as pyg_syntax_hl
from .st_code_highlight import SublimeHighlight
from .st_mapping import lang_map
from . import imagetint
import re
import os
from . import frontmatter
try:
import bs4
except Exception:
bs4 = None
HTML_SHEET_SUPPORT = int(sublime.version()) >= 4074
LOCATION = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CSS_PATH = os.path.join(LOCATION, 'css', 'default.css')
DEFAULT_CSS = 'Packages/mdpopups/mdpopups_css/default.css'
OLD_DEFAULT_CSS = 'Packages/mdpopups/css/default.css'
DEFAULT_USER_CSS = 'Packages/User/mdpopups.css'
IDK = '''
<style>html {background-color: #333; color: red}</style>
<div><p>¯\\_(ツ)_/¯</p></div>
<div><p>
MdPopups failed to create<br>
the popup/phantom!<br><br>
Check the console to see if<br>
there are helpful errors.</p></div>
'''
HL_SETTING = 'mdpopups.use_sublime_highlighter'
STYLE_SETTING = 'mdpopups.default_style'
RE_BAD_ENTITIES = re.compile(r'(&(?!amp;|lt;|gt;|nbsp;)(?:\w+;|#\d+;))')
NODEBUG = 0
ERROR = 1
WARNING = 2
INFO = 3
def _log(msg):
print('mdpopups: {}'.format(str(msg)))
def _debug(msg, level):
if int(_get_setting('mdpopups.debug', NODEBUG)) >= level:
_log(msg)
def _get_setting(name, default=None):
return sublime.load_settings('Preferences.sublime-settings').get(name, default)
def _can_show(view, location=-1):
can_show = True
sel = view.sel()
if location >= 0:
region = view.visible_region()
if region.begin() > location or region.end() < location:
can_show = False
elif len(sel) >= 1:
region = view.visible_region()
if region.begin() > sel[0].b or region.end() < sel[0].b:
can_show = False
else:
can_show = False
return can_show
_scheme_cache = OrderedDict()
_highlighter_cache = OrderedDict()
def _clear_cache():
global _scheme_cache
global _highlighter_cache
_scheme_cache = OrderedDict()
_highlighter_cache = OrderedDict()
def _is_cache_expired(cache_time):
delta_time = _get_setting('mdpopups.cache_refresh_time', 30)
if not isinstance(delta_time, int) or delta_time < 0:
delta_time = 30
return delta_time == 0 or (time.time() - cache_time) >= (delta_time * 60)
def _prune_cache():
limit = _get_setting('mdpopups.cache_limit', 10)
if limit is None or not isinstance(limit, int) or limit <= 0:
limit = 10
while len(_scheme_cache) >= limit:
_scheme_cache.popitem(last=True)
while len(_highlighter_cache) >= limit:
_highlighter_cache.popitem(last=True)
def _get_sublime_highlighter(view):
scheme = view.settings().get('color_scheme')
obj = None
if scheme is not None:
if scheme in _highlighter_cache:
obj, t = _highlighter_cache[scheme]
if _is_cache_expired(t):
obj = None
if obj is None:
try:
obj = SublimeHighlight(scheme)
_prune_cache()
_highlighter_cache[scheme] = (obj, time.time())
except Exception:
_log('Failed to get Sublime highlighter object!')
_debug(traceback.format_exc(), ERROR)
pass
return obj
def _get_scheme(scheme):
settings = sublime.load_settings("Preferences.sublime-settings")
obj = None
user_css = ''
default_css = ''
if scheme is not None:
if scheme in _scheme_cache:
obj, user_css, default_css, t = _scheme_cache[scheme]
if (
_is_cache_expired(t) or
obj.use_pygments != (not settings.get(HL_SETTING, True)) or
obj.default_style != settings.get(STYLE_SETTING, True)
):
obj = None
user_css = ''
default_css = ''
if obj is None:
try:
obj = SchemeTemplate(scheme)
_prune_cache()
user_css = _get_user_css()
default_css = _get_default_css()
_scheme_cache[scheme] = (obj, user_css, default_css, time.time())
except Exception:
_log('Failed to convert/retrieve scheme to CSS!')
_debug(traceback.format_exc(), ERROR)
return obj, user_css, default_css
def _get_default_css():
css = ''
try:
with codecs.open(DEFAULT_CSS_PATH, encoding='utf-8') as f:
css = clean_css(f.read())
except Exception:
pass
return css
def _get_user_css():
css = None
user_css = _get_setting('mdpopups.user_css', DEFAULT_USER_CSS)
if user_css == OLD_DEFAULT_CSS:
user_css = DEFAULT_CSS
if user_css == DEFAULT_CSS:
css = _get_default_css()
else:
try:
css = clean_css(sublime.load_resource(user_css))
except Exception:
pass
return css if css else ''
class _MdWrapper(markdown.Markdown):
Meta = {}
def __init__(self, *args, **kwargs):
if 'allow_code_wrap' in kwargs:
self.sublime_wrap = kwargs['allow_code_wrap']
del kwargs['allow_code_wrap']
if 'language_map' in kwargs:
self.plugin_map = kwargs['language_map']
del kwargs['language_map']
if 'sublime_hl' in kwargs:
self.sublime_hl = kwargs['sublime_hl']
del kwargs['sublime_hl']
super(_MdWrapper, self).__init__(*args, **kwargs)
def registerExtensions(self, extensions, configs):
from .markdown import util
from .markdown.extensions import Extension
for ext in extensions:
try:
if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext._extendMarkdown(self)
elif ext is not None:
raise TypeError(
'Extension "{}.{}" must be of type: "markdown.Extension"'.format(
ext.__class__.__module__, ext.__class__.__name__
)
)
except Exception:
_log('Failed to load markdown module!')
_debug(traceback.format_exc(), ERROR)
return self
def _get_theme(view, css=None, css_type=POPUP, template_vars=None):
obj, user_css, default_css = _get_scheme(view.settings().get('color_scheme'))
try:
return obj.apply_template(
view,
default_css + '\n' +
((clean_css(css) + '\n') if css else '') +
user_css,
css_type,
template_vars
) if obj is not None else ''
except Exception:
_log('Failed to retrieve scheme CSS!')
_debug(traceback.format_exc(), ERROR)
return ''
def _remove_entities(text):
p = html.parser.HTMLParser()
def repl(m):
return p.unescape(m.group(1))
return RE_BAD_ENTITIES.sub(repl, text)
def _create_html(
view, content, md=True, css=None, debug=False, css_type=POPUP,
wrapper_class=None, template_vars=None, template_env_options=None
):
debug = _get_setting('mdpopups.debug', NODEBUG)
if css is None or not isinstance(css, str):
css = ''
style = _get_theme(view, css, css_type, template_vars)
if debug:
_debug('=====CSS=====', INFO)
_debug(style, INFO)
if md:
content = md2html(
view, content, template_vars=template_vars,
template_env_options=template_env_options
)
else:
content = _markup_template(frontmatter.get_frontmatter(content)[1], template_vars, template_env_options)
if debug:
_debug('=====HTML OUTPUT=====', INFO)
if bs4:
soup = bs4.BeautifulSoup(content, "html.parser")
_debug('\n' + soup.prettify(), INFO)
else:
_debug('\n' + content, INFO)
if wrapper_class:
wrapper = ('<div class="mdpopups"><div class="{}">'.format(wrapper_class)) + '{}</div></div>'
else:
wrapper = '<div class="mdpopups">{}</div>'
html = "<style>{}</style>".format(style)
html += _remove_entities(wrapper.format(content))
return html
def _markup_template(markup, variables, options):
if variables:
if options is None:
options = {}
env = jinja2.Environment(**options)
return env.from_string(markup).render(plugin=variables)
return markup
def version():
return ver.version()
def md2html(
view, markup, template_vars=None, template_env_options=None, **kwargs
):
if _get_setting('mdpopups.use_sublime_highlighter', True):
sublime_hl = (True, _get_sublime_highlighter(view))
else:
sublime_hl = (False, None)
fm, markup = frontmatter.get_frontmatter(markup)
extensions = [
"mdpopups.mdx.highlight",
"pymdownx.inlinehilite",
"pymdownx.superfences"
]
configs = {
"mdpopups.mdx.highlight": {
"guess_lang": False
},
"pymdownx.inlinehilite": {
"style_plain_text": True
},
"pymdownx.superfences": {
"custom_fences": fm.get('custom_fences', [])
}
}
md_exts = fm.get('markdown_extensions', None)
if md_exts is None:
extensions.extend(
[
"markdown.extensions.admonition",
"markdown.extensions.attr_list",
"markdown.extensions.def_list",
"pymdownx.betterem",
"pymdownx.magiclink",
"markdown.extensions.md_in_html",
"markdown.extensions.nl2br"
]
)
else:
for ext in md_exts:
if isinstance(ext, (dict, OrderedDict)):
k, v = next(iter(ext.items()))
if not k.startswith('mdpopups.'):
if k == "pymdownx.extrarawhtml":
k = 'markdown.extensions.md_in_html'
_debug(
"Warning: 'pymdownx.extrarawhtml' no longer exists. 'markdown.extensions.md_in_html'"
" will be used instead. Plugins should migrate as mdpopups will not redirect in the "
"future.",
WARNING
)
extensions.append(k)
if v is not None:
configs[k] = v
elif isinstance(ext, str):
if not ext.startswith('mdpopups.'):
if ext == "pymdownx.extrarawhtml":
ext = 'markdown.extensions.md_in_html'
_debug(
"Warning: 'pymdownx.extrarawhtml' no longer exists. 'markdown.extensions.md_in_html'"
" will be used instead. Plugins should migrate as mdpopups will not redirect in the"
" future.",
WARNING
)
extensions.append(ext)
return _MdWrapper(
extensions=extensions,
extension_configs=configs,
sublime_hl=sublime_hl,
allow_code_wrap=fm.get('allow_code_wrap', False),
language_map=fm.get('language_map', {})
).convert(_markup_template(markup, template_vars, template_env_options)).replace('"', '"')
def color_box(
colors, border="#000000ff", border2=None, height=32, width=32,
border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF
):
return colorbox.color_box(
colors, border, border2, height, width,
border_size, check_size, max_colors, alpha, border_map
)
def color_box_raw(
colors, border="#000000ff", border2=None, height=32, width=32,
border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF
):
return colorbox.color_box_raw(
colors, border, border2, height, width,
border_size, check_size, max_colors, alpha, border_map
)
def tint(img, color, opacity=255, height=None, width=None):
if isinstance(img, str):
try:
img = sublime.load_binary_resource(img)
except Exception:
_log('Could not open binary file!')
_debug(traceback.format_exc(), ERROR)
return ''
return imagetint.tint(img, color, opacity, height, width)
def tint_raw(img, color, opacity=255):
if isinstance(img, str):
try:
img = sublime.load_binary_resource(img)
except Exception:
_log('Could not open binary file!')
_debug(traceback.format_exc(), ERROR)
return ''
return imagetint.tint_raw(img, color, opacity)
def get_language_from_view(view):
lang = None
user_map = sublime.load_settings('Preferences.sublime-settings').get('mdpopups.sublime_user_lang_map', {})
syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]
keys = set(list(lang_map.keys()) + list(user_map.keys()))
for key in keys:
v1 = lang_map.get(key, (tuple(), tuple()))[1]
v2 = user_map.get(key, (tuple(), tuple()))[1]
if syntax in (tuple(v2) + v1):
lang = key
break
return lang
def syntax_highlight(view, src, language=None, inline=False, allow_code_wrap=False, language_map=None):
try:
if _get_setting('mdpopups.use_sublime_highlighter', True):
highlighter = _get_sublime_highlighter(view)
code = highlighter.syntax_highlight(
src, language, inline=inline, code_wrap=(not inline and allow_code_wrap), plugin_map=language_map
)
else:
code = pyg_syntax_hl(
src, language, inline=inline, code_wrap=(not inline and allow_code_wrap)
)
except Exception:
code = src
_log('Failed to highlight code!')
_debug(traceback.format_exc(), ERROR)
return code
def tabs2spaces(text, tab_size=4):
return text.expandtabs(tab_size)
def scope2style(view, scope, selected=False, explicit_background=False):
style = {
'color': None,
'background': None,
'style': ''
}
obj = _get_scheme(view.settings().get('color_scheme'))[0]
style_obj = obj.guess_style(view, scope, selected, explicit_background)
style['color'] = style_obj['foreground']
style['background'] = style_obj['background']
font = []
if style_obj['bold']:
font.append('bold')
if style_obj['italic']:
font.append('italic')
if style_obj['underline']:
font.append('underline')
if style_obj['glow']:
font.append('glow')
style['style'] = ' '.join(font)
return style
def clear_cache():
_clear_cache()
def hide_popup(view):
view.hide_popup()
def update_popup(
view, content, md=True, css=None, wrapper_class=None,
template_vars=None, template_env_options=None, **kwargs
):
disabled = _get_setting('mdpopups.disable', False)
if disabled:
_debug('Popups disabled', WARNING)
return
try:
html = _create_html(
view, content, md, css, css_type=POPUP, wrapper_class=wrapper_class,
template_vars=template_vars, template_env_options=template_env_options
)
except Exception:
_log(traceback.format_exc())
html = IDK
view.update_popup(html)
def show_popup(
view, content, md=True, css=None,
flags=0, location=-1, max_width=320, max_height=240,
on_navigate=None, on_hide=None, wrapper_class=None,
template_vars=None, template_env_options=None, **kwargs
):
disabled = _get_setting('mdpopups.disable', False)
if disabled:
_debug('Popups disabled', WARNING)
return
if not _can_show(view, location):
return
try:
html = _create_html(
view, content, md, css, css_type=POPUP, wrapper_class=wrapper_class,
template_vars=template_vars, template_env_options=template_env_options
)
except Exception:
_log(traceback.format_exc())
html = IDK
view.show_popup(
html, flags=flags, location=location, max_width=max_width,
max_height=max_height, on_navigate=on_navigate, on_hide=on_hide
)
def is_popup_visible(view):
return view.is_popup_visible()
def add_phantom(
view, key, region, content, layout, md=True,
css=None, on_navigate=None, wrapper_class=None,
template_vars=None, template_env_options=None, **kwargs
):
disabled = _get_setting('mdpopups.disable', False)
if disabled:
_debug('Phantoms disabled', WARNING)
return
try:
html = _create_html(
view, content, md, css, css_type=PHANTOM, wrapper_class=wrapper_class,
template_vars=template_vars, template_env_options=template_env_options
)
except Exception:
_log(traceback.format_exc())
html = IDK
return view.add_phantom(key, region, html, layout, on_navigate)
def erase_phantoms(view, key):
view.erase_phantoms(key)
def erase_phantom_by_id(view, pid):
view.erase_phantom_by_id(pid)
def query_phantom(view, pid):
return view.query_phantom(pid)
def query_phantoms(view, pids):
return view.query_phantoms(pids)
if HTML_SHEET_SUPPORT:
def new_html_sheet(
window, name, contents, md=True, css=None, flags=0, group=-1,
wrapper_class=None, template_vars=None, template_env_options=None, **kwargs
):
view = window.create_output_panel('mdpopups-dummy', unlisted=True)
try:
html = _create_html(
view, contents, md, css, css_type=SHEET, wrapper_class=wrapper_class,
template_vars=template_vars, template_env_options=template_env_options
)
except Exception:
_log(traceback.format_exc())
html = IDK
return window.new_html_sheet(name, html, flags, group)
def update_html_sheet(
sheet, contents, md=True, css=None, wrapper_class=None,
template_vars=None, template_env_options=None, **kwargs
):
window = sheet.window()
view = window.create_output_panel('mdpopups-dummy', unlisted=True)
try:
html = _create_html(
view, contents, md, css, css_type=SHEET, wrapper_class=wrapper_class,
template_vars=template_vars, template_env_options=template_env_options
)
except Exception:
_log(traceback.format_exc())
html = IDK
sheet.set_contents(html)
class Phantom(sublime.Phantom):
def __init__(
self, region, content, layout, md=True,
css=None, on_navigate=None, wrapper_class=None,
template_vars=None, template_env_options=None, **kwargs
):
super().__init__(region, content, layout, on_navigate)
self.md = md
self.css = css
self.wrapper_class = wrapper_class
self.template_vars = template_vars
self.template_env_options = template_env_options
def __eq__(self, rhs):
return (
self.region == rhs.region and self.content == rhs.content and
self.layout == rhs.layout and self.on_navigate == rhs.on_navigate and
self.md == rhs.md and self.css == rhs.css and
self.wrapper_class == rhs.wrapper_class and self.template_vars == rhs.template_vars and
self.template_env_options == rhs.template_env_options
)
class PhantomSet(sublime.PhantomSet):
def __init__(self, view, key=""):
super().__init__(view, key)
def __del__(self):
for p in self.phantoms:
erase_phantom_by_id(self.view, p.id)
def update(self, new_phantoms):
regions = query_phantoms(self.view, [p.id for p in self.phantoms])
for i in range(len(regions)):
self.phantoms[i].region = regions[i]
count = 0
for p in new_phantoms:
if not isinstance(p, Phantom):
p = Phantom(
p.region, p.content, p.layout,
md=False, css=None, on_navigate=p.on_navigate, wrapper_class=None,
template_vars=None, template_env_options=None
)
new_phantoms[count] = p
try:
idx = self.phantoms.index(p)
p.id = self.phantoms[idx].id
except ValueError:
p.id = add_phantom(
self.view,
self.key,
p.region,
p.content,
p.layout,
p.md,
p.css,
p.on_navigate,
p.wrapper_class,
p.template_vars,
p.template_env_options
)
count += 1
for p in self.phantoms:
if p not in new_phantoms and p.region != sublime.Region(-1):
erase_phantom_by_id(self.view, p.id)
self.phantoms = new_phantoms
|
MIT License
|
formlio/forml
|
forml/runtime/asset/_persistent.py
|
Registry.generations
|
python
|
def generations(
self, project: 'level.Project.Key', lineage: 'level.Lineage.Key'
) -> typing.Iterable[typing.Union[str, int, 'level.Generation.Key']]:
raise NotImplementedError()
|
List the generations of given lineage.
Args:
project: Project of which the lineage is to be listed.
lineage: Lineage of the project to be listed.
Returns:
Generations listing.
|
https://github.com/formlio/forml/blob/fd070da74a0107e37c0c643dd8df8680618fef74/forml/runtime/asset/_persistent.py#L109-L121
|
import abc
import logging
import pathlib
import tempfile
import typing
import uuid
from forml import _provider, conf
from forml.conf.parsed import provider as provcfg
if typing.TYPE_CHECKING:
from forml import project as prj
from ._directory import level
LOGGER = logging.getLogger(__name__)
TMPDIR = tempfile.TemporaryDirectory(
prefix=f'{conf.APPNAME}-persistent-', dir=conf.tmpdir
)
def mkdtemp(prefix: typing.Optional[str] = None, suffix: typing.Optional[str] = None) -> pathlib.Path:
return pathlib.Path(tempfile.mkdtemp(prefix, suffix, TMPDIR.name))
class Registry(_provider.Interface, default=provcfg.Registry.default, path=provcfg.Registry.path):
def __init__(self, staging: typing.Optional[typing.Union[str, pathlib.Path]] = None):
if not staging:
LOGGER.warning('Using temporal non-distributed staging for %s', self)
staging = mkdtemp(prefix=f'{self}-staging-')
self._staging: pathlib.Path = pathlib.Path(staging)
def __repr__(self):
name = self.__class__.__module__.rsplit('.', 1)[-1].capitalize()
return f'{name}-registry'
def __hash__(self):
return hash(self.__class__) ^ hash(self._staging)
def __eq__(self, other):
return isinstance(other, self.__class__) and other._staging == self._staging
def mount(self, project: 'level.Project.Key', lineage: 'level.Lineage.Key') -> 'prj.Artifact':
package = self.pull(project, lineage)
return package.install(self._staging / package.manifest.name / str(package.manifest.version))
@abc.abstractmethod
def projects(self) -> typing.Iterable[typing.Union[str, 'level.Project.Key']]:
raise NotImplementedError()
@abc.abstractmethod
def lineages(self, project: 'level.Project.Key') -> typing.Iterable[typing.Union[str, 'level.Lineage.Key']]:
raise NotImplementedError()
@abc.abstractmethod
|
Apache License 2.0
|
x-datainitiative/tick
|
tick/plot/plot_hawkes.py
|
_normalize_functions
|
python
|
def _normalize_functions(y_values_list, t_values):
y_values_list = np.array(y_values_list)
normalizations = [
1. / np.trapz(y_values, t_values) for y_values in y_values_list
]
normalized_y_values_list = (y_values_list.T * normalizations).T
return normalized_y_values_list, normalizations
|
Normalize list of functions by their integral value
Parameters
----------
y_values_list : `list` of np.ndarray
y values of the list of function we want to normalize
t_values : `np.ndarray`
t values shared by all functions given with y_values_list
Returns
-------
normalized_y_values_list : `list` of np.ndarray
Normalized y values of the given list of function
normalizations : `np.ndarray`
Normalization factors that have been used
|
https://github.com/x-datainitiative/tick/blob/bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48/tick/plot/plot_hawkes.py#L302-L326
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tick.plot.plot_utilities import share_x, share_y
def plot_hawkes_kernel_norms(kernel_object, show=True, pcolor_kwargs=None,
node_names=None, rotate_x_labels=0.):
n_nodes = kernel_object.n_nodes
if node_names is None:
node_names = range(n_nodes)
elif len(node_names) != n_nodes:
ValueError('node_names must be a list of length {} but has length {}'
.format(n_nodes, len(node_names)))
row_labels = ['${} \\rightarrow$'.format(i) for i in node_names]
column_labels = ['$\\rightarrow {}$'.format(i) for i in node_names]
norms = kernel_object.get_kernel_norms()
fig, ax = plt.subplots()
if rotate_x_labels != 0.:
rotate_x_labels = -rotate_x_labels
x_label_alignment = 'right'
else:
x_label_alignment = 'center'
if pcolor_kwargs is None:
pcolor_kwargs = {}
if norms.min() >= 0:
pcolor_kwargs.setdefault("cmap", plt.cm.Blues)
else:
pcolor_kwargs.setdefault("cmap", plt.cm.RdBu)
max_abs_norm = np.max(np.abs(norms))
pcolor_kwargs.setdefault("vmin", -max_abs_norm)
pcolor_kwargs.setdefault("vmax", max_abs_norm)
heatmap = ax.pcolor(norms, **pcolor_kwargs)
ax.set_xticks(np.arange(norms.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(norms.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False, fontsize=17,
rotation=rotate_x_labels, ha=x_label_alignment)
ax.set_yticklabels(column_labels, minor=False, fontsize=17)
fig.subplots_adjust(right=0.8)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.5)
fig.colorbar(heatmap, cax=cax)
if show:
plt.show()
return fig
def plot_hawkes_kernels(kernel_object, support=None, hawkes=None, n_points=300,
show=True, log_scale=False, min_support=1e-4, ax=None):
if support is None or support <= 0:
plot_supports = kernel_object.get_kernel_supports()
support = plot_supports.max() * 1.2
n_nodes = kernel_object.n_nodes
if log_scale:
x_values = np.logspace(
np.log10(min_support), np.log10(support), n_points)
else:
x_values = np.linspace(0, support, n_points)
if ax is None:
fig, ax_list_list = plt.subplots(n_nodes, n_nodes, sharex=True,
sharey=True)
else:
if ax.shape != (n_nodes, n_nodes):
raise ValueError('Given ax has shape {} but should have shape {}'
.format(ax.shape, (n_nodes, n_nodes)))
ax_list_list = ax
show = False
if n_nodes == 1:
ax_list_list = np.array([[ax_list_list]])
for i, ax_list in enumerate(ax_list_list):
for j, ax in enumerate(ax_list):
y_values = kernel_object.get_kernel_values(i, j, x_values)
ax.plot(x_values, y_values, label="Kernel (%d, %d)" % (i, j))
if hawkes:
y_true_values = hawkes.kernels[i, j].get_values(x_values)
ax.plot(x_values, y_true_values,
label="True Kernel (%d, %d)" % (i, j))
if i == n_nodes - 1:
ax.set_xlabel(r"$t$", fontsize=18)
ax.set_ylabel(r"$\phi^{%g,%g}(t)$" % (i, j), fontsize=18)
if log_scale:
ax.set_xscale('log')
ax.set_yscale('log')
legend = ax.legend()
for label in legend.get_texts():
label.set_fontsize(12)
if show:
plt.show()
return ax_list_list.ravel()[0].figure
def plot_hawkes_baseline_and_kernels(
hawkes_object, kernel_support=None, hawkes=None, n_points=300,
show=True, log_scale=False, min_support=1e-4, ax=None):
n_nodes = hawkes_object.n_nodes
if ax is None:
fig, ax_list_list = plt.subplots(n_nodes, n_nodes + 1, figsize=(10, 6))
else:
ax_list_list = ax
show = False
ax_kernels = ax_list_list[:, 1:]
plot_hawkes_kernels(hawkes_object, support=kernel_support, hawkes=hawkes,
n_points=n_points, show=False, log_scale=log_scale,
min_support=min_support, ax=ax_kernels)
share_x(ax_kernels)
share_y(ax_kernels)
ax_baselines = ax_list_list[:, 0]
t_values = np.linspace(0, hawkes_object.period_length, n_points)
for i in range(n_nodes):
ax = ax_baselines[i]
ax.plot(t_values, hawkes_object.get_baseline_values(i, t_values),
label='baseline ({})'.format(i))
ax.plot(t_values, hawkes.get_baseline_values(i, t_values),
label='true baseline ({})'.format(i))
ax.set_ylabel("$\mu_{}(t)$".format(i), fontsize=18)
if i == n_nodes - 1:
ax.set_xlabel(r"$t$", fontsize=18)
legend = ax.legend()
for label in legend.get_texts():
label.set_fontsize(12)
share_x(ax_baselines.reshape(2, 1))
share_y(ax_baselines.reshape(2, 1))
if show:
plt.show()
return ax_list_list.ravel()[0].figure
|
BSD 3-Clause New or Revised License
|
wolverton-research-group/periodic-table-plotter
|
ptplotter/plotter.py
|
Square.quadra_label
|
python
|
def quadra_label(self, labels, **kwargs):
assert len(labels) == 4
x1 = x4 = self.x - self.dx*0.25
x2 = x3 = self.x + self.dx*1.25
y1 = y2 = self.y + self.dy*0.75
y3 = y4 = self.y + self.dy*0.25
ha1 = ha4 = 'right'
ha2 = ha3 = 'left'
va = 'center'
fontdict = kwargs.get('fontdict', {})
plt.text(x1, y1, labels[0], ha=ha1, va=va, fontdict=fontdict)
plt.text(x2, y2, labels[1], ha=ha2, va=va, fontdict=fontdict)
plt.text(x3, y3, labels[2], ha=ha3, va=va, fontdict=fontdict)
plt.text(x4, y4, labels[3], ha=ha4, va=va, fontdict=fontdict)
|
Plots 4 values in a square:
+------+-----+
| | |
label1 | 1st | 2nd | label2
| | |
+------+-----+
| | |
label4 | 4th | 3rd | label3
| | |
+------+-----+
|
https://github.com/wolverton-research-group/periodic-table-plotter/blob/e0192dfebcf2a356892e82bed6dc3e113639e8ba/ptplotter/plotter.py#L655-L682
|
from __future__ import division
import os, os.path
import yaml
import numpy as np
import matplotlib.pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from six import string_types
INSTALL_PATH = os.path.dirname(os.path.abspath(__file__))
__all__ = ['ElementDataPlotter', 'plt', 'Square']
elt_data = yaml.load(open(INSTALL_PATH+'/elements.yml').read())
def atomic_number(elt):
return elt['z']
def eneg_diff(data):
d = sorted([ elt_data[elt]['electronegativity'] for elt in data['pair']])
return abs(d[0] - d[1])
def symbol(data):
return data['symbol']
def get_coord_from_symbol(data):
if isinstance(data, string_types):
data = elt_data[data]
x = data['group']
y = data['period']
if x == 0:
if data['z'] > 56 and data['z'] < 74:
y = 9.0
x = data['z'] - 54
elif data['z'] > 88 and data['z'] < 106:
y = 10.0
x = data['z'] - 86
return x, -y
class ElementDataPlotter(object):
def __init__(self, data={}, elements=None, pair_data={}, **kwargs):
init_data = dict(elt_data)
elts = {}
for elt, value in init_data.items():
assert isinstance(value, dict)
if elements:
if not elt in elements:
continue
elts[elt] = init_data[elt]
if elt in data:
elts[elt].update(data[elt])
self._pairs = pair_data
self._elts = elts
self.squares = []
self.collections = []
self.functions = []
self.groups = []
self.values = []
self.cmaps = []
if not 'axes' in kwargs:
fig, axes = plt.subplots()
fig.patch.set_visible(False)
else:
axes = kwargs.get('axes')
self._ax = axes
def add_square(self, square):
self.squares.append(square)
for ind, patch in zip(self.groups, square.patches):
self.collections[ind].append(patch)
def set_functions(self, functions, cmaps='jet'):
self.groups = []
if not isinstance(cmaps, (tuple, list)):
cmaps = [cmaps]*len(functions)
self.cmaps = cmaps
self.collections = []
self.functions = []
for i, fl in enumerate(functions):
if not isinstance(fl, (tuple, list)):
fl = [fl]
for f in fl:
self.groups.append(i)
self.functions.append(f)
self.collections.append([])
self.inv = {}
for i, j in enumerate(self.groups):
self.inv[j] = self.inv.get(j, []) + [i]
@property
def labels(self):
return [ f.__doc__ for f in self.functions ]
@property
def cbar_labels(self):
return [ '\n'.join([self.functions[j].__doc__ for j in group ])
for group in self.inv.values() ]
guide_square = None
def create_guide_square(self, x=7, y=-1.5, labels=[], **kwargs):
if not labels:
labels = self.labels
guide_square = Square(x, y, data=labels, **kwargs)
for p in guide_square.patches:
p.set_facecolor(kwargs.get('color', 'white'))
self.guide_square = guide_square
def make_grid(self, xelts=[], yelts=[], functions=[eneg_diff],
cmaps='jet', draw=True, **kwargs):
self.set_functions(functions, cmaps=cmaps)
if not xelts and not yelts:
elts = set()
for pair in self._pairs:
elts |= set(pair)
xelts = list(elts)
yelts = list(elts)
for i, elt1 in enumerate(xelts):
self._ax.text(i+0.5, 0.25, elt1,
va='bottom', ha='center', rotation='vertical')
self._ax.text(i+0.5, -len(yelts) - 0.25, elt1,
va='top', ha='center', rotation='vertical')
for j, elt2 in enumerate(yelts):
pair = (elt1, elt2)
data = self._pairs.get(pair, {})
if isinstance(data, dict):
data['pair'] = pair
if data is None:
continue
vals = [ f(data) for f in self.functions ]
square = Square(i, -j, dy=-1., data=vals, **kwargs)
self.add_square(square)
for j, elt2 in enumerate(yelts):
self._ax.text(-0.25, -j-0.5, elt2,
va='center', ha='right')
self._ax.text(len(xelts) + 0.25, -j-0.5, elt2,
va='center', ha='left')
self._ax.set_xticks([])
self._ax.set_yticks([])
if draw:
self.draw(**kwargs)
self._ax.autoscale_view()
def ptable(self, functions=[atomic_number], cmaps=None, guide=True, **kwargs):
self.set_functions(functions, cmaps=cmaps)
self._ax.axis('off')
self._ax.set_xticks([])
self._ax.set_yticks([])
for elt, data in self._elts.items():
x, y = get_coord_from_symbol(elt)
values = [ f(data) for f in self.functions ]
elt_label = elt if kwargs.get('elem_labels', True) else None
square = Square(x, y, label=elt_label, data=values, **kwargs)
self.add_square(square)
if guide:
self.create_guide_square(**kwargs)
self.draw(**kwargs)
def draw(self, colorbars=True, **kwargs):
self.cbars = []
for coll, cmap, label in zip(self.collections, self.cmaps, self.cbar_labels):
pc = PatchCollection(coll, cmap=cmap)
pc.set_array(np.array([ p.value for p in coll ]))
self._ax.add_collection(pc)
if colorbars:
options = {
'orientation':'horizontal',
'pad':0.05, 'aspect':60
}
options.update(kwargs.get('colorbar_options', {}))
cbar = plt.colorbar(pc, **options)
cbar.set_label(label)
self.cbars.append(cbar)
fontdict = kwargs.get('font', {'color':'white'})
for s in self.squares:
if not s.label:
continue
x = s.x + s.dx/2
y = s.y + s.dy/2
self._ax.text(x, y, s.label, ha='center',
va='center',
fontdict=fontdict)
if self.guide_square:
self.guide_square.set_labels(self.labels)
pc = PatchCollection(self.guide_square.patches, match_original=True)
self._ax.add_collection(pc)
self._ax.autoscale_view()
def redraw_ptable(self, **kwargs):
self._ax.clear()
self.draw(**kwargs)
def pettifor(self, xaxis=atomic_number, yaxis=atomic_number,
label=symbol):
x, y = [],[]
for elt, data in self._elts.items():
xx = xaxis(data)
yy = yaxis(data)
x.append(xx)
y.append(yy)
self._ax.text(xx, yy, label(data))
self._ax.scatter(x, y)
self._ax.autoscale_view()
self._ax.set_xlabel(xaxis.__doc__)
self._ax.set_ylabel(yaxis.__doc__)
class Square(object):
def __init__(self, x, y, label=None, data=[], dx=1., dy=1., **kwargs):
self.x, self.y = x,y
self.dx, self.dy = dx, dy
self.label = label
self.data = data
self.set_patches(len(data), **kwargs)
for p, d in zip(self.patches, self.data):
p.value = d
def __getitem__(self, index):
return self.patches[index]
def __len__(self):
return len(self.patches)
def set_labels(self, labels, **kwargs):
if isinstance(labels, string_types):
self.single_label(labels, **kwargs)
elif len(labels) == 1:
self.single_label(labels[0], **kwargs)
elif len(labels) == 2:
self.double_label(labels, **kwargs)
elif len(labels) == 3:
self.triple_label(labels, **kwargs)
elif len(labels) == 4:
self.quadra_label(labels, **kwargs)
else:
raise ValueError("Cannot put more than 4 values onto a tile")
def set_patches(self, n, **kwargs):
if n == 1:
self.single_color(**kwargs)
elif n == 2:
self.double_color(**kwargs)
elif n == 3:
self.triple_color(**kwargs)
elif n == 4:
self.quadra_color(**kwargs)
else:
raise ValueError("Cannot put more than 4 values onto a tile")
def single_color(self, **kwargs):
x1, x2 = self.x, self.x+self.dx
y1, y2 = self.y, self.y+self.dy
patch = Polygon([
[x1, y1],
[x2, y1],
[x2, y2],
[x1, y2],
[x1, y1]])
self.patches = [patch]
def single_label(self, label, position="top", **kwargs):
assert isinstance(label, string_types)
if position == 'top':
x = self.x + self.dx/2
y = self.y + self.dy*1.25
ha, va = 'center', 'bottom'
elif position == 'bottom':
x = self.x + self.dx/2
y = self.y - self.dy*0.25
ha, va = 'center', 'top'
elif position == 'left':
x = self.x - self.dx*0.25
y = self.y + self.y/2
ha, va = 'right', 'center'
elif position == 'right':
x = self.x + self.dx*1.25
y = self.y + self.y/2
ha, va = 'left', 'center'
else:
raise ValueError("`position` must be one of:"
"'top', 'bottom', 'left', 'right'")
fontdict = kwargs.get('fontdict', {})
plt.text(x, y, label, ha=ha, va=va, fontdict=fontdict)
def double_color(self, **kwargs):
x1, x2 = self.x, self.x+self.dx
y1, y2 = self.y, self.y+self.dy
top = Polygon([
[x1, y1],
[x1, y2],
[x2, y1],
[x1, y1]])
bot = Polygon([
[x2, y2],
[x1, y2],
[x2, y1],
[x2, y2]])
self.patches = [top, bot]
def double_label(self, labels, position="horizontal", **kwargs):
assert len(labels) == 2
if position == 'horizontal':
x1 = self.x - self.dx*0.25
x2 = self.x + self.dx*1.25
y1 = y2 = self.y + self.dy/2
ha1, ha2 = 'right', 'left'
va1 = va2 = 'center'
elif position == 'vertical':
x1 = x2 = self.x + self.dx/2
y1 = self.y + self.dy*1.25
y2 = self.y - self.dy*0.25
ha1 = ha2 = 'center'
va1, va2 = 'bottom', 'top'
else:
raise ValueError("`position` must be one of:"
"'horizontal', 'vertical'")
fontdict = kwargs.get('fontdict', {})
plt.text(x1, y1, labels[0], ha=ha1, va=va1, fontdict=fontdict)
plt.text(x2, y2, labels[1], ha=ha2, va=va2, fontdict=fontdict)
def triple_color(self, **kwargs):
x1, x2 = self.x, self.x+self.dx
x3 = self.x + self.dx*0.5
y1, y2 = self.y, self.y+self.dy
y3 = self.y + self.dy*0.666
left = Polygon([
[x3, y3],
[x1, y1],
[x1, y2],
[x3, y2],
[x3, y3]])
right = Polygon([
[x3, y3],
[x2, y1],
[x2, y2],
[x3, y2],
[x3, y3]])
bot = Polygon([
[x3, y3],
[x1, y1],
[x2, y1],
[x3, y3]])
self.patches = [left, right, bot]
def triple_label(self, labels, **kwargs):
assert len(labels) == 3
x1 = self.x - self.dx*0.25
x2 = self.x + self.dx*1.25
x3 = self.x + self.dx/2
y1 = y2 = self.y + self.dy*0.75
y3 = self.y - self.dy*0.25
fontdict = kwargs.get('fontdict', {})
plt.text(x1, y1, labels[0], ha='right', fontdict=fontdict)
plt.text(x2, y2, labels[1], ha='left', fontdict=fontdict)
plt.text(x3, y3, labels[2], ha='center', va='top', fontdict=fontdict)
def quadra_color(self, **kwargs):
x1, x2 = self.x, self.x+self.dx
y1, y2 = self.y, self.y+self.dy
x3 = (x1+x2)/2
y3 = (y1+y2)/2
bl = Polygon([
[x3, y3],
[x3, y1],
[x1, y1],
[x1, y3],
[x3, y3]])
tl = Polygon([
[x3, y3],
[x3, y2],
[x1, y2],
[x1, y3],
[x3, y3]])
tr = Polygon([
[x3, y3],
[x3, y2],
[x2, y2],
[x2, y3],
[x3, y3]])
br = Polygon([
[x3, y3],
[x3, y1],
[x2, y1],
[x2, y3],
[x3, y3]])
self.patches = [tl, tr, br, bl]
|
MIT License
|
lukasliebel/multidepth
|
ptsemseg/utils.py
|
convert_state_dict
|
python
|
def convert_state_dict(state_dict):
if not next(iter(state_dict)).startswith("module."):
return state_dict
else:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
|
Converts a state dict saved from a dataParallel module to normal
module state_dict inplace
:param state_dict is the loaded DataParallel model_state
|
https://github.com/lukasliebel/multidepth/blob/7478d355d8b7c5da7866fc335597a43073a712c9/ptsemseg/utils.py#L36-L49
|
import os
import logging
import datetime
import numpy as np
from collections import OrderedDict
def recursive_glob(rootdir=".", suffix=""):
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)
]
def alpha_blend(input_image, segmentation_mask, alpha=0.5):
blended = np.zeros(input_image.size, dtype=np.float32)
blended = input_image * alpha + segmentation_mask * (1 - alpha)
return blended
|
MIT License
|
optibus/playback
|
playback/tape_cassettes/s3/s3_tape_cassette.py
|
S3TapeCassette._create_content_filter_func
|
python
|
def _create_content_filter_func(metadata):
def content_filter_func(recording_str):
recording_metadata = decode(recording_str)
for k, v in metadata.items():
recorded_value = recording_metadata.get(k)
if recorded_value is None and v is not None:
return False
if isinstance(v, str):
if not fnmatch(recorded_value, v):
return False
elif recorded_value != v:
return False
return True
return content_filter_func
|
Create a filter function which filters on the metadata values
:param metadata: metadata values to filter by
:type metadata: dict
:return: the filter function
:rtype: function
|
https://github.com/optibus/playback/blob/7e8ea29f764a52753aef4e3334fd9bcf7826e082/playback/tape_cassettes/s3/s3_tape_cassette.py#L244-L266
|
from __future__ import absolute_import
import random
from copy import copy
from random import Random
from zlib import compress, decompress
import logging
import uuid
from fnmatch import fnmatch
from datetime import datetime, timedelta
import six
from jsonpickle import encode, decode
from parse import compile
from playback.exceptions import NoSuchRecording
from playback.tape_cassette import TapeCassette
from playback.recordings.memory.memory_recording import MemoryRecording
from playback.tape_cassettes.s3.s3_basic_facade import S3BasicFacade
from playback.utils.timing_utils import Timed
_logger = logging.getLogger(__name__)
class S3TapeCassette(TapeCassette):
FULL_KEY = 'tape_recorder_recordings/{key_prefix}full/{id}'
METADATA_KEY = 'tape_recorder_recordings/{key_prefix}metadata/{id}'
RECORDING_ID = '{category}/{day}/{id}'
DAY_FORMAT = '%Y%m%d'
def __init__(self, bucket, key_prefix='', region=None, transient=False, read_only=True,
infrequent_access_kb_threshold=None, sampling_calculator=None):
_logger.info(u'Creating S3TapeCassette using bucket {}'.format(bucket))
self.bucket = bucket
self.key_prefix = (key_prefix + '/') if key_prefix else ''
self.transient = transient
self.read_only = read_only
self.infrequent_access_threshold = infrequent_access_kb_threshold * 1024 if infrequent_access_kb_threshold else None
self.sampling_calculator = sampling_calculator
self._random = Random(110613)
self._metadata_key_parser = compile(self.METADATA_KEY)
self._recording_id_parser = compile(self.RECORDING_ID)
self._s3_facade = S3BasicFacade(self.bucket, region=region)
def get_recording(self, recording_id):
full_key = self.FULL_KEY.format(key_prefix=self.key_prefix, id=recording_id)
try:
_logger.info(u'Fetching compressed recording using key {}'.format(full_key))
compressed_recording = self._s3_facade.get_string(full_key)
_logger.info(u'Decompressing recording of key {}'.format(full_key))
serialized_data = decompress(compressed_recording)
except Exception as ex:
if 'NoSuchKey' in type(ex).__name__:
raise NoSuchRecording(recording_id)
raise
_logger.info(u'Decoding recording of key {}'.format(full_key))
full_data = decode(serialized_data)
metadata = full_data.pop('_metadata', {})
_logger.info(u'Returning recording of key {}'.format(full_key))
return MemoryRecording(recording_id, recording_data=full_data, recording_metadata=metadata)
def get_recording_metadata(self, recording_id):
metadata_key = self.METADATA_KEY.format(key_prefix=self.key_prefix, id=recording_id)
try:
_logger.debug(u'Fetching metadata of recording using key {}'.format(metadata_key))
serialized_data = self._s3_facade.get_string(metadata_key)
except Exception as ex:
if 'NoSuchKey' in type(ex).__name__:
raise NoSuchRecording(recording_id)
raise
_logger.debug(u'Decoding metadata of recording of key {}'.format(metadata_key))
return decode(serialized_data)
def create_new_recording(self, category):
self._assert_not_read_only()
_id = self.RECORDING_ID.format(
category=category,
day=datetime.today().strftime(self.DAY_FORMAT),
id=uuid.uuid1().hex
)
logging.info(u'Creating a new recording with id {}'.format(_id))
return MemoryRecording(_id)
def _assert_not_read_only(self):
assert not self.read_only, 'Cassette is in readonly mode'
def _save_recording(self, recording):
self._assert_not_read_only()
full_data = copy(recording.recording_data)
full_data['_metadata'] = recording.recording_metadata
with Timed() as timed:
encoded_full = encode(full_data, unpicklable=True)
encoding_duration = timed.duration
with Timed() as timed:
if six.PY3 and isinstance(encoded_full, str):
compressed_full = compress(bytes(encoded_full.encode('utf-8')))
else:
compressed_full = compress(encoded_full)
compression_duration = timed.duration
recording_size = len(compressed_full)
if not self._should_sample(recording, recording_size):
logging.info(u'Recording with id {} is not chosen to be sampled and is being discarded'.format(
recording.id))
return
storage_class = self._calculate_storage_class(recording_size)
full_key = self.FULL_KEY.format(key_prefix=self.key_prefix, id=recording.id)
metadata_key = self.METADATA_KEY.format(key_prefix=self.key_prefix, id=recording.id)
_logger.debug(u"Saving recording full data at bucket {} under key {}".format(self.bucket, full_key))
self._s3_facade.put_string(full_key, compressed_full, StorageClass=storage_class)
_logger.debug(u"Saving recording metadata at bucket {} under key {}".format(self.bucket, metadata_key))
self._s3_facade.put_string(metadata_key, encode(recording.recording_metadata, unpicklable=True))
_logger.info(
u"Recording saved at bucket {} under key {} "
u"(recording size: {:.1f}KB -compressed-> {:.1f}KB, storage class: {}, "
u"encoding/compression durations: {:.2f}/{:.2f})".format(
self.bucket, full_key,
len(encoded_full) / 1024.0, len(compressed_full) / 1024.0, storage_class,
encoding_duration, compression_duration))
def _calculate_storage_class(self, recording_size):
storage_class = 'STANDARD'
if self.infrequent_access_threshold and recording_size >= self.infrequent_access_threshold:
storage_class = 'STANDARD_IA'
return storage_class
def _should_sample(self, recording, recording_size):
if self.sampling_calculator is None:
return True
category = self.extract_recording_category(recording.id)
ratio = self.sampling_calculator(category, recording_size, recording)
if ratio >= 1:
return True
return self._random.random() <= ratio
def create_id_prefix_iterators(self, id_prefixes, start_date=None, end_date=None, content_filter=None, limit=None):
return [self._s3_facade.iter_keys(
prefix=self.METADATA_KEY.format(
key_prefix=self.key_prefix, id=id_prefix
),
start_date=start_date,
end_date=end_date,
content_filter=content_filter,
limit=copy(limit)) for id_prefix in id_prefixes]
@staticmethod
|
BSD 3-Clause New or Revised License
|
llsourcell/iota_demo
|
iota/bin/__init__.py
|
IotaCommandLineApp.run_from_argv
|
python
|
def run_from_argv(self, argv=None):
exit_code = self.execute(**self.parse_argv(argv))
if exit_code is None:
exit_code = 0
return exit_code
|
Executes the command from a collection of arguments (e.g.,
:py:data`sys.argv`) and returns the exit code.
:param argv:
Arguments to pass to the argument parser.
If ``None``, defaults to ``sys.argv[1:]``.
|
https://github.com/llsourcell/iota_demo/blob/8d8b0434f42ebdb810f9c2fe177117c4af459ccc/iota/bin/__init__.py#L63-L78
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from abc import ABCMeta, abstractmethod as abstract_method
from argparse import ArgumentParser
from getpass import getpass as secure_input
from io import StringIO
from sys import exit
from typing import Optional, Text
from six import text_type, with_metaclass
from iota import Iota, __version__
from iota.crypto.types import Seed
__all__ = [
'IotaCommandLineApp',
]
class IotaCommandLineApp(with_metaclass(ABCMeta)):
requires_seed = True
def __init__(self, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin):
super(IotaCommandLineApp, self).__init__()
self.stdout = stdout
self.stderr = stderr
self.stdin = stdin
@abstract_method
def execute(self, api, **arguments):
raise NotImplementedError(
'Not implemented in {cls}.'.format(cls=type(self).__name__),
)
def main(self):
exit(self.run_from_argv())
|
MIT License
|
takuti/flurs
|
flurs/utils/metric.py
|
average_precision
|
python
|
def average_precision(truth, recommend):
if len(truth) == 0:
if len(recommend) == 0:
return 1.
return 0.
tp = accum = 0.
for n in range(recommend.size):
if recommend[n] in truth:
tp += 1.
accum += (tp / (n + 1.))
return accum / truth.size
|
Average Precision (AP).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: AP.
|
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L66-L87
|
import numpy as np
def count_true_positive(truth, recommend):
tp = 0
for r in recommend:
if r in truth:
tp += 1
return tp
def recall(truth, recommend, k=None):
if len(truth) == 0:
if len(recommend) == 0:
return 1.
return 0.
if k is None:
k = len(recommend)
return count_true_positive(truth, recommend[:k]) / float(truth.size)
def precision(truth, recommend, k=None):
if len(recommend) == 0:
if len(truth) == 0:
return 1.
return 0.
if k is None:
k = len(recommend)
return count_true_positive(truth, recommend[:k]) / float(k)
|
MIT License
|
huxiaoling/imageseg-2.5d_topo
|
TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/wheel.py
|
wheel_version
|
python
|
def wheel_version(source_dir):
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except Exception:
return None
|
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return None if we couldn't parse / extract it.
|
https://github.com/huxiaoling/imageseg-2.5d_topo/blob/86ca52e53f838309132a67f2a3e58cf69d314770/TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/wheel.py#L616-L633
|
from __future__ import absolute_import
import collections
import compileall
import csv
import hashlib
import logging
import os.path
import re
import shutil
import stat
import sys
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six import StringIO
from pip._internal import pep425tags
from pip._internal.download import path_to_url, unpack_url
from pip._internal.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel,
)
from pip._internal.locations import (
PIP_DELETE_MARKER_FILENAME, distutils_scheme,
)
from pip._internal.models.link import Link
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
call_subprocess, captured_stdout, ensure_dir, read_chunks,
)
from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import open_spinner
if MYPY_CHECK_RUNNING:
from typing import (
Dict, List, Optional, Sequence, Mapping, Tuple, IO, Text, Any,
Union, Iterable
)
from pip._vendor.packaging.requirements import Requirement
from pip._internal.req.req_install import InstallRequirement
from pip._internal.download import PipSession
from pip._internal.index import FormatControl, PackageFinder
from pip._internal.operations.prepare import (
RequirementPreparer
)
from pip._internal.cache import WheelCache
from pip._internal.pep425tags import Pep425Tag
InstalledCSVRow = Tuple[str, ...]
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def rehash(path, blocksize=1 << 20):
h = hashlib.sha256()
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, str(length))
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def replace_python_tag(wheelname, new_tag):
parts = wheelname.split('-')
parts[-3] = new_tag
return '-'.join(parts)
def fix_script(path):
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
return None
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
return str(s).replace(" ", "").split("=")
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def message_about_scripts_not_on_PATH(scripts):
if not scripts:
return None
grouped_by_dir = collections.defaultdict(set)
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
}
if not warn_for:
return None
msg_lines = []
for parent_dir, scripts in warn_for.items():
scripts = sorted(scripts)
if len(scripts) == 1:
start_text = "script {} is".format(scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(scripts[:-1]) + " and " + scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
return "\n".join(msg_lines)
def sorted_outrows(outrows):
return sorted(outrows, key=lambda row: tuple(str(x) for x in row))
def get_csv_rows_for_installed(
old_csv_rows,
installed,
changed,
generated,
lib_dir,
):
installed_rows = []
for row in old_csv_rows:
if len(row) > 3:
logger.warning(
'RECORD line has more than three elements: {}'.format(row)
)
row = list(row)
old_path = row[0]
new_path = installed.pop(old_path, old_path)
row[0] = new_path
if new_path in changed:
digest, length = rehash(new_path)
row[1] = digest
row[2] = length
installed_rows.append(tuple(row))
for f in generated:
digest, length = rehash(f)
installed_rows.append((normpath(f, lib_dir), digest, str(length)))
for f in installed:
installed_rows.append((installed[f], '', ''))
return installed_rows
def move_wheel_files(
name,
req,
wheeldir,
user=False,
home=None,
root=None,
pycompile=True,
scheme=None,
isolated=False,
prefix=None,
warn_script_location=True
):
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
installed = {}
changed = set()
generated = []
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def record_installed(srcfile, destfile, modified=False):
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
ensure_dir(destdir)
if os.path.exists(destfile):
os.unlink(destfile)
shutil.copyfile(srcfile, destfile)
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
maker.clobber = True
maker.variants = {''}
maker.set_mode = True
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = r"""# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
if len(console) > 0:
generated_console_scripts = maker.make_multiple(
['%s = %s' % kv for kv in console.items()]
)
generated.extend(generated_console_scripts)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warning(msg)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
outrows = get_csv_rows_for_installed(
reader, installed=installed, changed=changed,
generated=generated, lib_dir=lib_dir,
)
writer = csv.writer(record_out)
for row in sorted_outrows(outrows):
writer.writerow(row)
shutil.move(temp_record, record)
|
MIT License
|
ofir-reich/seir-graph
|
seir.py
|
edges2graph
|
python
|
def edges2graph(edges, N=N):
H = nx.MultiGraph()
H.add_nodes_from(np.arange(N))
H.add_edges_from(edges)
return H
|
Creates MultiGraph from list of edges.
|
https://github.com/ofir-reich/seir-graph/blob/3bde8f4025bf100529494fbca255b10eddab05f5/seir.py#L277-L282
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import statsmodels.api as sm
mpl.rcParams['figure.figsize'] = [25, 10]
N = 10000
MIN_DEGREE = 2
MEAN_DEGREE = 20
GAMMA = 0.2
STEPS_PER_DAY = 5
MAX_STEPS = 3000
INITIAL_INFECTED_NUM = 10
PROB_INFECT = 0.027
PROB_INFECT_EXPOSED_FACTOR = 0.5
RELATIVE_INFECTIOUSNESS_ASYMPTOMATIC = 0.5
DURATION_EXPOSED_INFECTS = 2
INCUBATION_DURATION_MEAN = 5.1
INCUBATION_DURATION_STD = 4.38
PROB_ASYMPTOMATIC = 0.40
PROB_RECOVER = 1 / 3.5
DAYS_IN_QUARANTINE = 14
PROB_INFECTED_DETECTED = 0
PROB_NEIGHBOR_TRACED = 0
PROB_EXPOSED_DETECTED = 0
QUARANTINE_NEIGHBORS = False
TEST_NEIGHBORS = False
TEST_DELAY_TIME = 0
_EPSILON = 1e-10
DOUBLING_DAYS = float((28 - 4) / np.log2(2220 / 11))
MAIN_GROUPS = ['susceptible', 'exposed', 'recovered', 'infected', 'quarantined']
ALL_COLUMNS = MAIN_GROUPS + ['test_rate']
GROUP2COLOR = dict(susceptible='blue', exposed='orange', recovered='green',
quarantined='purple', infected='red', test_rate='brown')
SUMMARY_ATTRS = ['duration', 'fraction_infected', 'doubling_days', 'fraction_quarantine_time', 'peak_infected_time', 'peak_fraction_infected', 'fraction_tests', 'peak_test_rate']
class SimulationResults(object):
def __init__(self, results_df, G=None, **kwargs):
self.df = results_df
self.hyperparams = kwargs
for name, value in kwargs.items():
setattr(self, name, value)
if G is None:
self.N = results_df[['susceptible', 'exposed', 'infected', 'recovered']].iloc[0].sum()
else:
self.N = len(G)
if G.name.startswith('power_law'):
self.G_attrs = dict(zip(['gamma', 'min_degree', 'mean_degree'], map(float, G.name.split('_')[-3:])))
self.G_attrs['N'] = self.N
if not hasattr(self, 'steps_per_day'):
self.steps_per_day = ((results_df['step'].iloc[1] - results_df['step'].iloc[0]) /
(results_df['day'].iloc[1] - results_df['day'].iloc[0]))
self.analyze_results_df()
def calculate_doubling_time(self):
results_df = self.df
idx_end = (results_df['exposed'] > results_df['exposed'].max() * 0.5).to_numpy().nonzero()[0][0]
if self.peak_exposed_time < 3 or idx_end == 0:
return np.inf
exposed_min = results_df['exposed'][:idx_end].min()
idx_min = results_df['exposed'][:idx_end].idxmin()
start_candidates = ((results_df.index >= idx_min) &
(results_df.index < idx_end) &
(results_df['exposed'] > exposed_min * 2)).to_numpy().nonzero()[0]
if not start_candidates.size:
return np.inf
idx_start = start_candidates[0]
try:
X = sm.add_constant(results_df[idx_start:idx_end][['day']], prepend=False)
log2_exposed = np.log2(results_df[idx_start:idx_end]['exposed'])
regression_results = sm.OLS(log2_exposed, X).fit()
doubling_days = 1 / regression_results.params['day']
except ValueError:
doubling_days = None
return doubling_days
def calculate_halving_time(self):
results_df = self.df
idx_peak = results_df['exposed'].idxmax()
end_candidates = ((results_df.index >= idx_peak) &
(results_df['exposed'] < self.peak_exposed / 5) &
(results_df['exposed'] > 5)).to_numpy().nonzero()[0]
if not end_candidates.size:
return None
idx_end = end_candidates[0]
idx_start = idx_peak
if idx_end - idx_start < 20:
return None
try:
X = sm.add_constant(results_df[idx_start:idx_end][['day']], prepend=False)
log2_exposed = np.log2(results_df[idx_start:idx_end]['exposed'])
regression_results = sm.OLS(log2_exposed, X).fit()
halving_days = -1 / regression_results.params['day']
except ValueError:
halving_days = None
return halving_days
def analyze_results_df(self):
results_df = self.df
self.duration = results_df['day'].iloc[-1]
self.peak_infected_time = results_df['day'].iloc[results_df['infected'].idxmax()]
self.peak_infected = results_df['infected'].max()
self.peak_fraction_infected = results_df['infected'].max() / self.N
self.peak_exposed_time = results_df['day'].iloc[results_df['exposed'].idxmax()]
self.peak_exposed = results_df['exposed'].max()
self.doubling_days = self.calculate_doubling_time()
self.halving_days = self.calculate_halving_time()
self.fraction_infected = results_df['recovered'].iloc[-1] / self.N
fraction_quarantine_steps = results_df['quarantined'].sum() / self.N
self.fraction_quarantine_time = fraction_quarantine_steps / self.steps_per_day
total_tests = results_df['test_rate'].sum() / self.steps_per_day
self.fraction_tests = total_tests / self.N
self.peak_test_rate = results_df['test_rate'].max() / self.N
def plot_trends(self, fraction_of_population=True, hyperparams=True, G_attrs=False, columns=None, vertical=False):
if columns is None:
columns = MAIN_GROUPS
title = ''
if hyperparams:
title += str({k: round(v, 3) for k, v in self.hyperparams.items()})
if G_attrs:
title += str(self.G_attrs)
if fraction_of_population:
scale = self.N
ylabel = 'Fraction of population'
else:
scale = 1
ylabel = 'Individuals'
if vertical:
fig, ax_arr = plt.subplots(2, 1, figsize=(10, 20))
ax_arr[0].set_title('Epidemic Simulation')
ax_arr[1].set_title('log scale')
else:
fig, ax_arr = plt.subplots(1, 2)
fig.suptitle(title, fontsize=18)
results_to_plot = self.df.drop('step', axis=1).set_index('day') / scale
results_to_plot = results_to_plot[columns]
for pane_ind, logy in enumerate([False, True]):
ax = results_to_plot.plot(ax=ax_arr[pane_ind], logy=logy)
ax.set_ylabel(ylabel)
if logy:
ax.get_legend().remove()
return ax_arr
def summary(self, hyperparams=False, G_attrs=False, plot=False, **plot_kwargs):
summary_dict = {attr_name: getattr(self, attr_name) for attr_name in SUMMARY_ATTRS}
summary_series_list = [pd.Series(summary_dict)]
if hyperparams:
summary_series_list.append(pd.Series(self.hyperparams))
if G_attrs:
summary_series_list.append(pd.Series(self.G_attrs))
print(pd.concat(summary_series_list))
if plot:
self.plot_trends(hyperparams=hyperparams, G_attrs=G_attrs, **plot_kwargs)
def get_gamma_distribution_params(mean, std):
theta = std**2 / mean
k = mean / theta
return k, theta
|
Apache License 2.0
|
dallinger/dallinger
|
dallinger/mturk.py
|
MTurkService.get_qualification_type_by_name
|
python
|
def get_qualification_type_by_name(self, name):
max_fuzzy_matches_to_check = 100
query = name.upper()
args = {
"Query": query,
"MustBeRequestable": False,
"MustBeOwnedByCaller": True,
"MaxResults": max_fuzzy_matches_to_check,
}
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
start = time.time()
while not results:
elapsed = time.time() - start
if elapsed > self.max_wait_secs:
return None
time.sleep(1)
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
qualifications = [self._translate_qtype(r) for r in results]
if len(qualifications) > 1:
for qualification in qualifications:
if qualification["name"].upper() == query:
return qualification
raise MTurkServiceException("{} was not a unique name".format(query))
return qualifications[0]
|
Return a Qualification Type by name. If the provided name matches
more than one Qualification, check to see if any of the results
match the provided name exactly. If there's an exact match, return
that Qualification. Otherwise, raise an exception.
|
https://github.com/dallinger/dallinger/blob/2bc309c422935d372a7568cc18340e3b5b3f6a21/dallinger/mturk.py#L312-L345
|
import boto3
import datetime
import logging
import time
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
from cached_property import cached_property
logger = logging.getLogger(__file__)
PERCENTAGE_APPROVED_REQUIREMENT_ID = "000000000000000000L0"
LOCALE_REQUIREMENT_ID = "00000000000000000071"
MAX_SUPPORTED_BATCH_SIZE = 100
class MTurkServiceException(Exception):
class RemoteAPICallTimedOut(MTurkServiceException):
class DuplicateQualificationNameError(MTurkServiceException):
class QualificationNotFoundException(MTurkServiceException):
class WorkerLacksQualification(MTurkServiceException):
class RevokedQualification(MTurkServiceException):
class NonExistentSubscription(MTurkServiceException):
class SNSService(object):
max_wait_secs = 12
def __init__(
self, aws_access_key_id, aws_secret_access_key, region_name, confirm=True
):
self.aws_key = aws_access_key_id
self.aws_secret = aws_secret_access_key
self.region_name = region_name
self.do_confirm_subscription = confirm
@cached_property
def _sns(self):
session = boto3.session.Session(
aws_access_key_id=self.aws_key,
aws_secret_access_key=self.aws_secret,
region_name=self.region_name,
)
return session.client("sns")
def confirm_subscription(self, token, topic):
logger.warning("Confirming SNS subsription.")
self._sns.confirm_subscription(
Token=token,
TopicArn=topic,
)
def create_subscription(self, experiment_id, notification_url):
logger.warning(
"Creating new SNS subscription for {}...".format(notification_url)
)
protocol = "https" if notification_url.startswith("https") else "http"
topic = self._sns.create_topic(Name=experiment_id)
subscription = self._sns.subscribe(
TopicArn=topic["TopicArn"],
Protocol=protocol,
Endpoint=notification_url,
ReturnSubscriptionArn=True,
)
start = time.time()
while self._awaiting_confirmation(subscription):
elapsed = time.time() - start
if elapsed > self.max_wait_secs:
raise RemoteAPICallTimedOut("Too long")
logger.warning("Awaiting SNS subscription confirmation...")
time.sleep(1)
logger.warning("Subscription confirmed.")
return topic["TopicArn"]
def cancel_subscription(self, experiment_id):
logger.warning("Cancelling SNS subscription")
topic_id = self._get_sns_topic_for_experiment(experiment_id)
if topic_id is None:
raise NonExistentSubscription(
"No SNS subscription found for {}".format(experiment_id)
)
self._sns.delete_topic(TopicArn=topic_id)
return True
def _awaiting_confirmation(self, subscription):
if not self.do_confirm_subscription:
return False
report = self._sns.get_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"]
)
status = report["Attributes"]["PendingConfirmation"]
return status == "true"
def _get_sns_topic_for_experiment(self, experiment_id):
experiment_topics = (
t for t in self._all_topics() if t.endswith(":" + experiment_id)
)
try:
return next(experiment_topics)
except StopIteration:
return None
def _all_topics(self):
done = False
next_token = None
while not done:
if next_token is not None:
response = self._sns.list_topics(NextToken=next_token)
else:
response = self._sns.list_topics()
if response:
for t in response["Topics"]:
yield t["TopicArn"]
if "NextToken" in response:
next_token = response["NextToken"]
else:
done = True
class MTurkQuestions(object):
@staticmethod
def external(ad_url, frame_height=600):
q = (
'<ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/'
'AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd">'
"<ExternalURL>{}</ExternalURL>"
"<FrameHeight>{}</FrameHeight></ExternalQuestion>"
)
return q.format(ad_url, frame_height)
@staticmethod
def compensation(title="Compensation HIT", sandbox=False, frame_height=600):
if sandbox:
action = "https://workersandbox.mturk.com/mturk/externalSubmit"
else:
action = "https://www.mturk.com/mturk/externalSubmit"
q = (
'<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">'
"<HTMLContent><![CDATA[<!DOCTYPE html><html>"
"<head>"
'<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>'
'<script type="text/javascript" src="https://s3.amazonaws.com/mturk-public/externalHIT_v1.js"></script>'
"</head>"
"<body>"
'<form name="mturk_form" method="post" id="mturk_form" action="{}">'
'<input type="hidden" value="" name="assignmentId" id="assignmentId"/>'
"<h1>{}</h1>"
"<p>We are sorry that you encountered difficulties with our experiment. "
"We will compensate you immediately upon submission of this HIT.</p>"
'<input type="hidden" name="some-input-required" value="anything" ></input>'
'<input type="submit" id="submitButton" value="Submit" /></p></form>'
'<script language="Javascript">turkSetAssignmentID();</script>'
"</body></html>]]>"
"</HTMLContent>"
"<FrameHeight>{}</FrameHeight>"
"</HTMLQuestion>"
)
return q.format(action, title, frame_height)
class MTurkQualificationRequirements(object):
@staticmethod
def min_approval(percentage):
return {
"QualificationTypeId": PERCENTAGE_APPROVED_REQUIREMENT_ID,
"Comparator": "GreaterThanOrEqualTo",
"IntegerValues": [percentage],
"RequiredToPreview": True,
}
@staticmethod
def restrict_to_countries(countries):
return {
"QualificationTypeId": LOCALE_REQUIREMENT_ID,
"Comparator": "EqualTo",
"LocaleValues": [{"Country": country} for country in countries],
"RequiredToPreview": True,
}
@staticmethod
def must_have(qualification_id):
return {
"QualificationTypeId": qualification_id,
"Comparator": "Exists",
"RequiredToPreview": True,
}
@staticmethod
def must_not_have(qualification_id):
return {
"QualificationTypeId": qualification_id,
"Comparator": "DoesNotExist",
"RequiredToPreview": True,
}
class MTurkService(object):
def __init__(
self,
aws_access_key_id,
aws_secret_access_key,
region_name,
sandbox=True,
max_wait_secs=0,
):
self.aws_key = aws_access_key_id
self.aws_secret = aws_secret_access_key
self.region_name = region_name
self.is_sandbox = sandbox
self.max_wait_secs = max_wait_secs
@cached_property
def mturk(self):
session = boto3.session.Session(
aws_access_key_id=self.aws_key,
aws_secret_access_key=self.aws_secret,
region_name=self.region_name,
)
return session.client(
"mturk", endpoint_url=self.host, region_name=self.region_name
)
@cached_property
def sns(self):
return SNSService(
aws_access_key_id=self.aws_key,
aws_secret_access_key=self.aws_secret,
region_name=self.region_name,
)
@property
def host(self):
if self.is_sandbox:
template = u"https://mturk-requester-sandbox.{}.amazonaws.com"
else:
template = u"https://mturk-requester.{}.amazonaws.com"
return template.format(self.region_name)
def account_balance(self):
response = self.mturk.get_account_balance()
return float(response["AvailableBalance"])
def check_credentials(self):
try:
return bool(self.mturk.get_account_balance())
except NoCredentialsError:
raise MTurkServiceException("No AWS credentials set!")
except ClientError:
raise MTurkServiceException("Invalid AWS credentials!")
except Exception as ex:
raise MTurkServiceException(
"Error checking credentials: {}".format(str(ex))
)
def confirm_subscription(self, token, topic):
self.sns.confirm_subscription(token=token, topic=topic)
def create_qualification_type(self, name, description, status="Active"):
try:
response = self.mturk.create_qualification_type(
Name=name, Description=description, QualificationTypeStatus=status
)
except Exception as ex:
if "already created a QualificationType with this name" in str(ex):
raise DuplicateQualificationNameError(str(ex))
else:
raise
return self._translate_qtype(response["QualificationType"])
|
MIT License
|
hbldh/bleak
|
bleak/backends/winrt/characteristic.py
|
BleakGATTCharacteristicWinRT.description
|
python
|
def description(self) -> str:
return self.obj.user_description
|
Description for this characteristic
|
https://github.com/hbldh/bleak/blob/87773f24be3e2a6fba2b76b17dcfc7d4b719d599/bleak/backends/winrt/characteristic.py#L95-L97
|
from uuid import UUID
from typing import List, Union
from bleak_winrt.windows.devices.bluetooth.genericattributeprofile import (
GattCharacteristicProperties,
)
from bleak.backends.characteristic import BleakGATTCharacteristic
from bleak.backends.descriptor import BleakGATTDescriptor
from bleak.backends.winrt.descriptor import BleakGATTDescriptorWinRT
_GattCharacteristicsPropertiesMap = {
GattCharacteristicProperties.NONE: (
"None",
"The characteristic doesn’t have any properties that apply",
),
GattCharacteristicProperties.BROADCAST: (
"Broadcast".lower(),
"The characteristic supports broadcasting",
),
GattCharacteristicProperties.READ: (
"Read".lower(),
"The characteristic is readable",
),
GattCharacteristicProperties.WRITE_WITHOUT_RESPONSE: (
"Write-Without-Response".lower(),
"The characteristic supports Write Without Response",
),
GattCharacteristicProperties.WRITE: (
"Write".lower(),
"The characteristic is writable",
),
GattCharacteristicProperties.NOTIFY: (
"Notify".lower(),
"The characteristic is notifiable",
),
GattCharacteristicProperties.INDICATE: (
"Indicate".lower(),
"The characteristic is indicatable",
),
GattCharacteristicProperties.AUTHENTICATED_SIGNED_WRITES: (
"Authenticated-Signed-Writes".lower(),
"The characteristic supports signed writes",
),
GattCharacteristicProperties.EXTENDED_PROPERTIES: (
"Extended-Properties".lower(),
"The ExtendedProperties Descriptor is present",
),
GattCharacteristicProperties.RELIABLE_WRITES: (
"Reliable-Writes".lower(),
"The characteristic supports reliable writes",
),
GattCharacteristicProperties.WRITABLE_AUXILIARIES: (
"Writable-Auxiliaries".lower(),
"The characteristic has writable auxiliaries",
),
}
class BleakGATTCharacteristicWinRT(BleakGATTCharacteristic):
def __init__(self, obj: GattCharacteristicProperties):
super().__init__(obj)
self.__descriptors = []
self.__props = [
_GattCharacteristicsPropertiesMap[v][0]
for v in [2 ** n for n in range(10)]
if (self.obj.characteristic_properties & v)
]
@property
def service_uuid(self) -> str:
return str(self.obj.service.uuid)
@property
def service_handle(self) -> int:
return int(self.obj.service.attribute_handle)
@property
def handle(self) -> int:
return int(self.obj.attribute_handle)
@property
def uuid(self) -> str:
return str(self.obj.uuid)
@property
|
MIT License
|
kane610/aiounifi
|
aiounifi/wlan.py
|
Wlan.usergroup_id
|
python
|
def usergroup_id(self) -> str:
return self.raw["usergroup_id"]
|
WLAN user group ID.
|
https://github.com/kane610/aiounifi/blob/a54f898f8df5fc9ab1a2e0a1af85833ca4a68fd3/aiounifi/wlan.py#L162-L164
|
from .api import APIItem, APIItems
URL = "/rest/wlanconf"
class Wlan(APIItem):
@property
def id(self) -> str:
return self.raw["_id"]
@property
def bc_filter_enabled(self) -> bool:
return self.raw.get("bc_filter_enabled", False)
@property
def bc_filter_list(self) -> list:
return self.raw["bc_filter_list"]
@property
def dtim_mode(self) -> str:
return self.raw["dtim_mode"]
@property
def dtim_na(self) -> int:
return self.raw["dtim_na"]
@property
def dtim_ng(self) -> int:
return self.raw["dtim_ng"]
@property
def enabled(self) -> bool:
return self.raw["enabled"]
@property
def group_rekey(self) -> int:
return self.raw["group_rekey"]
@property
def is_guest(self) -> bool:
return self.raw.get("is_guest", False)
@property
def mac_filter_enabled(self) -> bool:
return self.raw.get("mac_filter_enabled", False)
@property
def mac_filter_list(self) -> list:
return self.raw["mac_filter_list"]
@property
def mac_filter_policy(self) -> str:
return self.raw["mac_filter_policy"]
@property
def minrate_na_advertising_rates(self) -> bool:
return self.raw["minrate_na_advertising_rates"]
@property
def minrate_na_beacon_rate_kbps(self) -> int:
return self.raw["minrate_na_beacon_rate_kbps"]
@property
def minrate_na_data_rate_kbps(self) -> int:
return self.raw["minrate_na_data_rate_kbps"]
@property
def minrate_na_enabled(self) -> bool:
return self.raw.get("minrate_na_enabled", False)
@property
def minrate_na_mgmt_rate_kbps(self) -> int:
return self.raw["minrate_na_mgmt_rate_kbps"]
@property
def minrate_ng_advertising_rates(self) -> bool:
return self.raw["minrate_ng_advertising_rates"]
@property
def minrate_ng_beacon_rate_kbps(self) -> int:
return self.raw["minrate_ng_beacon_rate_kbps"]
@property
def minrate_ng_cck_rates_enabled(self) -> bool:
return self.raw.get("minrate_ng_cck_rates_enabled", False)
@property
def minrate_ng_data_rate_kbps(self) -> int:
return self.raw["minrate_ng_data_rate_kbps"]
@property
def minrate_ng_enabled(self) -> bool:
return self.raw.get("minrate_ng_enabled", False)
@property
def minrate_ng_mgmt_rate_kbps(self) -> int:
return self.raw["minrate_ng_mgmt_rate_kbps"]
@property
def name(self) -> str:
return self.raw["name"]
@property
def name_combine_enabled(self) -> bool:
return self.raw.get("name_combine_enabled", True)
@property
def name_combine_suffix(self) -> str:
return self.raw.get("name_combine_suffix", "")
@property
def no2ghz_oui(self) -> bool:
return self.raw["no2ghz_oui"]
@property
def schedule(self) -> list:
return self.raw["schedule"]
@property
def security(self) -> str:
return self.raw["security"]
@property
def site_id(self) -> str:
return self.raw["site_id"]
@property
|
MIT License
|
segatalab/lefse
|
lefsebiom/ValidateData.py
|
ValidateData.funcIsValidStringInt
|
python
|
def funcIsValidStringInt(parameterValue):
if not ValidateData.funcIsValidStringType(parameterValue):
return False
try:
int(parameterValue)
except:
return False
return True
|
Validates a parameter that is a string as a format which is an integer.
:param parameterValue: Value to be evaluated.
:type Unknown
|
https://github.com/segatalab/lefse/blob/77283af5734c83e07eb59694512da4cbba9f8a98/lefsebiom/ValidateData.py#L213-L230
|
__author__ = "Timothy Tickle"
__copyright__ = "Copyright 2012"
__credits__ = ["Timothy Tickle"]
__license__ = "MIT"
__maintainer__ = "Timothy Tickle"
__email__ = "ttickle@sph.harvard.edu"
__status__ = "Development"
from types import *
import decimal
import os
import re
import string
class ValidateData:
@staticmethod
def funcIsValidBoolean(parameterValue):
if parameterValue == None:
return False
if not type(parameterValue) is BooleanType:
return False
return True
@staticmethod
def funcIsTrue(parameterValue):
if(ValidateData.funcIsValidBoolean(parameterValue)):
if(parameterValue == True):
return True
return False
@staticmethod
def funcIsFalse(parameterValue):
if(ValidateData.funcIsValidBoolean(parameterValue)):
if(parameterValue == False):
return True
return False
@staticmethod
def funcIsValidInteger(parameterValue):
if (parameterValue == None):
return False
if not type(parameterValue) is IntType:
return False
return True
@staticmethod
def funcIsValidPositiveInteger(parameterValue, tempZero = False):
if not ValidateData.funcIsValidInteger(parameterValue):
return False
if (parameterValue < 0):
return False
if(parameterValue == 0):
return tempZero
return True
@staticmethod
def funcIsValidNumeric(parameterValue):
if (parameterValue == None):
return False
if((type(parameterValue) == IntType)or(type(parameterValue) == LongType)or(type(parameterValue) == FloatType)or(type(parameterValue) == ComplexType)or(str(type(parameterValue)) == "<type 'numpy.float64'>")):
if(not type(parameterValue) == BooleanType):
return True
return False
@staticmethod
def funcIsValidStringType(parameterValue):
if parameterValue == None:
return False
if not type(parameterValue) is StringType:
return False
return True
@staticmethod
def funcIsValidString(parameterValue):
if not ValidateData.funcIsValidStringType(parameterValue):
return False
if parameterValue.strip() == "":
return False
return True
@staticmethod
|
MIT License
|
bitlabstudio/django-dashboard-app
|
dashboard_app/widget_base.py
|
DashboardWidgetBase.get_setting
|
python
|
def get_setting(self, setting_name, default=None):
try:
setting = models.DashboardWidgetSettings.objects.get(
widget_name=self.get_name(),
setting_name=setting_name)
except models.DashboardWidgetSettings.DoesNotExist:
setting = default
return setting
|
Returns the setting for this widget from the database.
:setting_name: The name of the setting.
:default: Optional default value if the setting cannot be found.
|
https://github.com/bitlabstudio/django-dashboard-app/blob/ed98f2bca91a4ced36d0dd1aa1baee78e989cf64/dashboard_app/widget_base.py#L52-L66
|
from django.utils.timezone import now
from . import models
class DashboardWidgetBase(object):
update_interval = 1
template_name = 'dashboard_app/partials/widget.html'
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
def get_context_data(self):
return {'is_rendered': True, }
def get_last_update(self):
instance, created = models.DashboardWidgetLastUpdate.objects.get_or_create(
widget_name=self.get_name())
return instance
def get_name(self):
if hasattr(self, 'widget_name'):
return self.widget_name
return self.__class__.__name__
|
MIT License
|
kubeflow/testing
|
py/kubeflow/testing/create_unique_kf_instance.py
|
add_extra_users
|
python
|
def add_extra_users(project, extra_users):
logging.info("Adding additional IAM roles")
extra_users = extra_users.strip()
users = extra_users.split(",")
for user in users:
if not user:
continue
logging.info("Granting iap.httpsResourceAccessor to %s", user)
util.run(["gcloud", "projects",
"add-iam-policy-binding", project,
"--member=" + user,
"--role=roles/iap.httpsResourceAccessor"])
|
Grant appropriate permissions to additional users.
|
https://github.com/kubeflow/testing/blob/944bd42f519eb37b14b0ab1dac6c2f36db4bc91f/py/kubeflow/testing/create_unique_kf_instance.py#L294-L306
|
import argparse
import datetime
import json
import logging
import os
import re
import requests
import retrying
import tempfile
import uuid
import yaml
from googleapiclient import discovery
from googleapiclient import errors
from google.cloud import storage
from kubeflow.testing import gcp_util
from kubeflow.testing import util
from kubernetes import client as k8s_client
from kubernetes.client import rest
from oauth2client.client import GoogleCredentials
KFDEF_V1ALPHA1 = "kfdef.apps.kubeflow.org/v1alpha1"
def build_kfctl_go(args):
util.run(["make", "build-kfctl"], cwd=args.kubeflow_repo)
kfctl_path = os.path.join(args.kubeflow_repo, "bin", "kfctl")
return kfctl_path
def build_v06_spec(config_spec, project, email, zone, setup_project):
config_spec["spec"]["project"] = project
config_spec["spec"]["email"] = email
config_spec["spec"]["zone"] = zone
config_spec["spec"]["skipInitProject"] = not setup_project
return config_spec
def build_v07_spec(config_spec, project, email, zone, setup_project):
gcp_plugin = None
for p in config_spec["spec"]["plugins"]:
if p["kind"] != "KfGcpPlugin":
continue
gcp_plugin = p
if not gcp_plugin:
raise ValueError("No gcpplugin found in spec")
gcp_plugin["spec"]["project"] = project
gcp_plugin["spec"]["email"] = email
gcp_plugin["spec"]["zone"] = zone
gcp_plugin["spec"]["skipInitProject"] = not setup_project
return config_spec
class ApiNotEnabledError(Exception):
pass
def retry_if_api_not_enabled_error(exception):
return isinstance(exception, ApiNotEnabledError)
@retrying.retry(stop_max_delay=1*60*1000)
def check_if_kfapp_exists(project, name, zone):
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
deployments_client = dm.deployments()
enable_api = False
try:
deployments_client.get(project=project, deployment=name).execute()
except errors.HttpError as e:
if not e.content:
raise
error_content = json.loads(e.content)
if error_content.get("error", {}).get("code", 0) == 404:
return False
elif error_content.get("error", {}).get("code", 0) == 403:
logging.info("Fetching deployment %s in project %s returned error:\n%s",
name, project, error_content)
enable_api = True
else:
raise
if enable_api:
logging.info("Enabling the deployment manager api.")
util.run(["gcloud", "--project=" + project, "services", "enable",
"deploymentmanager.googleapis.com"])
logging.info("Api enabled; raising ApiNotEnabledError to force retry")
raise ApiNotEnabledError
util.run(["gcloud", "--project=" + project, "container", "clusters",
"get-credentials", "--zone=" + zone, name])
logging.info("Checking if project %s kfapp %s finished setup.", project, name)
util.load_kube_credentials()
api_client = k8s_client.ApiClient()
v1 = k8s_client.CoreV1Api(api_client)
ingress_namespace = "istio-system"
ingress_name = "envoy-ingress"
extensions = k8s_client.ExtensionsV1beta1Api(api_client)
missing_ingress = True
try:
logging.info("Trying to read ingress %s.%s", ingress_name,
ingress_namespace)
extensions.read_namespaced_ingress(ingress_name, ingress_namespace)
missing_ingress = False
logging.info("Ingress %s.%s exists", ingress_name, ingress_namespace)
except rest.ApiException as e:
if e.status == 404:
logging.info("Project: %s, KFApp: %s is missing ingress %s.%s",
project, name, ingress_namespace, ingress_name)
missing_ingress = True
else:
raise
if missing_ingress:
service_name = "istio-ingressgateway"
logging.info("ingress %s.%s exists; checking if service %s.%s exists",
ingress_namespace, ingress_name, ingress_namespace,
service_name)
has_service = False
try:
v1.read_namespaced_service(service_name, ingress_namespace)
has_service = True
except rest.ApiException as e:
if e.status == 404:
logging.info("Project: %s, KFApp: %s is missing service %s.%s",
project, name, ingress_namespace, service_name)
else:
raise
if has_service:
logging.info("Deleting service: %s.%s", ingress_namespace, service_name)
v1.delete_namespaced_service(service_name, ingress_namespace,
body=k8s_client.V1DeleteOptions())
logging.info("Deleted service: %s.%s", ingress_namespace, service_name)
return False
return True
def deploy_with_kfctl_go(kfctl_path, args, app_dir, env, labels=None):
logging.warning("Loading configs %s.", args.kfctl_config)
if args.kfctl_config.startswith("http"):
response = requests.get(args.kfctl_config)
raw_config = response.content
else:
with open(args.kfctl_config) as hf:
raw_config = hf.read()
config_spec = yaml.load(raw_config)
email = args.email
if not email:
logging.info("email not set trying to get default from gcloud")
email = util.run(["gcloud", "auth", "list",
"--filter", "status:ACTIVE", "--format", "value(account)"])
if not email:
raise ValueError("Could not determine GCP account being used.")
kfdef_version = config_spec["apiVersion"].strip().lower()
if kfdef_version == KFDEF_V1ALPHA1:
config_spec = build_v06_spec(config_spec, args.project, email, args.zone,
args.setup_project)
else:
config_spec = build_v07_spec(config_spec, args.project, email, args.zone,
args.setup_project)
config_spec["spec"] = util.filter_spartakus(config_spec["spec"])
if "name" in config_spec["metadata"]:
logging.info("Deleting name in kfdef spec.")
del config_spec["metadata"]["name"]
app_name = os.path.basename(app_dir)
if not "labels" in config_spec["metadata"]:
config_spec["metadata"]["labels"] = {}
if labels:
config_spec["metadata"]["labels"].update(labels)
logging.info("KFDefSpec:\n%s", yaml.safe_dump(config_spec))
if kfdef_version == KFDEF_V1ALPHA1:
logging.info("Deploying using v06 syntax")
logging.info("Checking if deployment %s already exists in project %s",
args.project, app_name)
if check_if_kfapp_exists(args.project, app_name, args.zone):
logging.info("Deployment %s already exists in project %s; not "
"redeploying", args.project, app_name)
return
with tempfile.NamedTemporaryFile(prefix="tmpkf_config", suffix=".yaml",
delete=False) as hf:
config_file = hf.name
logging.info("Writing file %s", config_file)
yaml.dump(config_spec, hf)
util.run([kfctl_path, "init", app_dir, "-V", "--config=" + config_file],
env=env)
util.run([kfctl_path, "generate", "-V", "all"], env=env, cwd=app_dir)
util.run([kfctl_path, "apply", "-V", "all"], env=env, cwd=app_dir)
else:
logging.info("Deploying using v07 syntax")
if not os.path.exists(app_dir):
logging.info("Creating app dir %s", app_dir)
os.makedirs(app_dir)
config_file = os.path.join(app_dir, "kf_config.yaml")
with open(config_file, "w") as hf:
logging.info("Writing file %s", config_file)
yaml.dump(config_spec, hf)
util.run([kfctl_path, "apply", "-V", "-f", config_file], env=env)
if args.use_self_cert:
logging.info("Configuring self signed certificate")
util.load_kube_credentials()
api_client = k8s_client.ApiClient()
ingress_namespace = "istio-system"
ingress_name = "envoy-ingress"
tls_endpoint = "{0}.endpoints.{1}.cloud.goog".format(app_name, args.project)
logging.info("Configuring self signed cert for %s", tls_endpoint)
util.use_self_signed_for_ingress(ingress_namespace, ingress_name,
tls_endpoint, api_client)
@retrying.retry(stop_max_delay=4*60*1000)
|
Apache License 2.0
|
asana/python-asana
|
asana/resources/tasks.py
|
Tasks.add_comment
|
python
|
def add_comment(self, task, params={}, **options):
path = "/tasks/%s/stories" % (task)
return self.client.post(path, params, **options)
|
Adds a comment to a task. The comment will be authored by the
currently authenticated user, and timestamped when the server receives
the request.
Returns the full record for the new story added to the task.
Parameters
----------
task : {Id} Globally unique identifier for the task.
[data] : {Object} Data for the request
- text : {String} The plain text of the comment to add.
|
https://github.com/asana/python-asana/blob/8b3f0677d8fcca81b5757d586a388ef9aeb428eb/asana/resources/tasks.py#L433-L447
|
from .gen.tasks import _Tasks
class Tasks(_Tasks):
def set_parent(self, task_id, params={}, **options):
path = '/tasks/%s/setParent' % (task_id)
return self.client.post(path, params, **options)
def create(self, params={}, **options):
return self.client.post("/tasks", params, **options)
def create_in_workspace(self, workspace, params={}, **options):
path = "/workspaces/%s/tasks" % (workspace)
return self.client.post(path, params, **options)
def find_by_id(self, task, params={}, **options):
path = "/tasks/%s" % (task)
return self.client.get(path, params, **options)
def update(self, task, params={}, **options):
path = "/tasks/%s" % (task)
return self.client.put(path, params, **options)
def delete(self, task, params={}, **options):
path = "/tasks/%s" % (task)
return self.client.delete(path, params, **options)
def duplicate_task(self, task, params={}, **options):
path = "/tasks/%s/duplicate" % (task)
return self.client.post(path, params, **options)
def find_by_project(self, project, params={}, **options):
path = "/projects/%s/tasks" % (project)
return self.client.get_collection(path, params, **options)
def find_by_tag(self, tag, params={}, **options):
path = "/tags/%s/tasks" % (tag)
return self.client.get_collection(path, params, **options)
def find_by_section(self, section, params={}, **options):
path = "/sections/%s/tasks" % (section)
return self.client.get_collection(path, params, **options)
def find_by_user_task_list(self, user_task_list, params={}, **options):
path = "/user_task_lists/%s/tasks" % (user_task_list)
return self.client.get_collection(path, params, **options)
def find_all(self, params={}, **options):
return self.client.get_collection("/tasks", params, **options)
def search_in_workspace(self, workspace, params={}, **options):
path = "/workspaces/%s/tasks/search" % (workspace)
return self.client.get_collection(path, params, **options)
def dependencies(self, task, params={}, **options):
path = "/tasks/%s/dependencies" % (task)
return self.client.get(path, params, **options)
def dependents(self, task, params={}, **options):
path = "/tasks/%s/dependents" % (task)
return self.client.get(path, params, **options)
def add_dependencies(self, task, params={}, **options):
path = "/tasks/%s/addDependencies" % (task)
return self.client.post(path, params, **options)
def add_dependents(self, task, params={}, **options):
path = "/tasks/%s/addDependents" % (task)
return self.client.post(path, params, **options)
def remove_dependencies(self, task, params={}, **options):
path = "/tasks/%s/removeDependencies" % (task)
return self.client.post(path, params, **options)
def remove_dependents(self, task, params={}, **options):
path = "/tasks/%s/removeDependents" % (task)
return self.client.post(path, params, **options)
def add_followers(self, task, params={}, **options):
path = "/tasks/%s/addFollowers" % (task)
return self.client.post(path, params, **options)
def remove_followers(self, task, params={}, **options):
path = "/tasks/%s/removeFollowers" % (task)
return self.client.post(path, params, **options)
def projects(self, task, params={}, **options):
path = "/tasks/%s/projects" % (task)
return self.client.get_collection(path, params, **options)
def add_project(self, task, params={}, **options):
path = "/tasks/%s/addProject" % (task)
return self.client.post(path, params, **options)
def remove_project(self, task, params={}, **options):
path = "/tasks/%s/removeProject" % (task)
return self.client.post(path, params, **options)
def tags(self, task, params={}, **options):
path = "/tasks/%s/tags" % (task)
return self.client.get_collection(path, params, **options)
def add_tag(self, task, params={}, **options):
path = "/tasks/%s/addTag" % (task)
return self.client.post(path, params, **options)
def remove_tag(self, task, params={}, **options):
path = "/tasks/%s/removeTag" % (task)
return self.client.post(path, params, **options)
def subtasks(self, task, params={}, **options):
path = "/tasks/%s/subtasks" % (task)
return self.client.get_collection(path, params, **options)
def add_subtask(self, task, params={}, **options):
path = "/tasks/%s/subtasks" % (task)
return self.client.post(path, params, **options)
def stories(self, task, params={}, **options):
path = "/tasks/%s/stories" % (task)
return self.client.get_collection(path, params, **options)
|
MIT License
|
google/mobly
|
mobly/logger.py
|
get_log_file_timestamp
|
python
|
def get_log_file_timestamp(delta=None):
return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)
|
Returns a timestamp in the format used for log file names.
Default is current time. If a delta is set, the return value will be
the current time offset by delta seconds.
Args:
delta: Number of seconds to offset from current time; can be negative.
Returns:
A timestamp in log filen name format with an offset.
|
https://github.com/google/mobly/blob/542a78a7198256d172f56546ab8a6493166b3d9b/mobly/logger.py#L156-L168
|
import datetime
import logging
import os
import re
import sys
from mobly import records
from mobly import utils
LINUX_MAX_FILENAME_LENGTH = 255
WINDOWS_MAX_FILENAME_LENGTH = 237
WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS = {
'<':
'-',
'>':
'-',
':':
'-',
'"':
'_',
'/':
'_',
'\\':
'_',
'|':
',',
'?':
',',
'*':
',',
chr(0):
'0',
}
WINDOWS_RESERVED_FILENAME_REGEX = re.compile(
r'^(CON|PRN|AUX|NUL|(COM|LPT)[0-9])(\.[^.]*)?$', re.IGNORECASE)
WINDOWS_RESERVED_FILENAME_PREFIX = 'mobly_'
log_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'
log_line_time_format = '%m-%d %H:%M:%S'
log_line_timestamp_len = 18
logline_timestamp_re = re.compile(r'\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d')
def _parse_logline_timestamp(t):
date, time = t.split(' ')
month, day = date.split('-')
h, m, s = time.split(':')
s, ms = s.split('.')
return (month, day, h, m, s, ms)
def is_valid_logline_timestamp(timestamp):
if len(timestamp) == log_line_timestamp_len:
if logline_timestamp_re.match(timestamp):
return True
return False
def logline_timestamp_comparator(t1, t2):
dt1 = _parse_logline_timestamp(t1)
dt2 = _parse_logline_timestamp(t2)
for u1, u2 in zip(dt1, dt2):
if u1 < u2:
return -1
elif u1 > u2:
return 1
return 0
def _get_timestamp(time_format, delta=None):
t = datetime.datetime.now()
if delta:
t = t + datetime.timedelta(seconds=delta)
return t.strftime(time_format)[:-3]
def epoch_to_log_line_timestamp(epoch_time, time_zone=None):
s, ms = divmod(epoch_time, 1000)
d = datetime.datetime.fromtimestamp(s, tz=time_zone)
return d.strftime('%m-%d %H:%M:%S.') + str(ms)
def get_log_line_timestamp(delta=None):
return _get_timestamp('%m-%d %H:%M:%S.%f', delta)
|
Apache License 2.0
|
google-research/selfstudy-adversarial-robustness
|
training/train_ls.py
|
SmoothLabelTrainLoop.loss
|
python
|
def loss(self, model, x, y, return_preds=False, wd=1e-4):
logits = model(x, training=True)
y_ls = tf.one_hot(y, 10)
y_ls = y_ls + .125
y_ls /= tf.reduce_sum(y_ls, axis=1, keepdims=True)
l_xe = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=y_ls))
total_loss = l_xe
if return_preds:
return total_loss, logits
else:
return total_loss
|
Compute the loss of the neural network on a given (x,y) tuple.
|
https://github.com/google-research/selfstudy-adversarial-robustness/blob/15d1c0126e3dbaa205862c39e31d4e69afc08167/training/train_ls.py#L35-L52
|
from absl import app
from absl import flags
from absl import logging
import os
import numpy as np
import tensorflow as tf
import common.data as data
from train_baseline import TrainLoop
FLAGS = flags.FLAGS
class SmoothLabelTrainLoop(TrainLoop):
def __init__(self, num_filters, num_classes, input_shape):
super().__init__(num_filters, num_classes, input_shape)
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.