repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rsmuc/health_monitoring_plugins
|
test/testagent/snmp_fd.py
|
1
|
1194
|
import ctypes
import netsnmpapi
class fd_set(ctypes.Structure):
_fields_ = [('fds_bits', ctypes.c_long * 32)]
class Timeval(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_usec", ctypes.c_long)]
def FD_SET(fd, fd_set):
"""Set fd in fd_set, where fd can may be in range of 0..FD_SETSIZE-1 (FD_SETSIZE is 1024 on Linux)."""
l64_offset = fd / 64
bit_in_l64_idx = fd % 64;
fd_set.fds_bits[l64_offset] = fd_set.fds_bits[l64_offset] | (2**bit_in_l64_idx)
def FD_ISSET(fd, fd_set):
"""Check if fd is in fd_set."""
l64_offset = fd / 64
bit_in_l64_idx = fd % 64;
if fd_set.fds_bits[l64_offset] & (2**bit_in_l64_idx) > 0:
return True
return False
def netsnmp_event_fd():
"""Return each netsnmp file descriptor by number."""
maxfd = ctypes.c_int(0)
fdset = fd_set()
timeval = Timeval(0, 0)
fakeblock = ctypes.c_int(1)
netsnmpapi.libnsa.snmp_select_info(
ctypes.byref(maxfd),
ctypes.byref(fdset),
ctypes.byref(timeval),
ctypes.byref(fakeblock)
)
for fd in range(0, maxfd.value):
if FD_ISSET(fd, fdset):
yield fd
|
gpl-2.0
| 7,479,397,408,930,141,000 | 29.421053 | 106 | 0.582077 | false |
trujunzhang/djzhang-targets
|
cwfuqs/cwfuqs/settings.py
|
1
|
2988
|
# -*- coding: utf-8 -*-
# Scrapy settings for cwfuqs project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'cwfuqs'
SPIDER_MODULES = ['cwfuqs.spiders']
NEWSPIDER_MODULE = 'cwfuqs.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cwfuqs (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'cwfuqs.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'cwfuqs.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'cwfuqs.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
mit
| -8,708,439,694,906,147,000 | 34.152941 | 109 | 0.772758 | false |
lholland421/LiamPlayground
|
collectd-rabbitmq/test_rabbitmq.py
|
1
|
1563
|
#!/usr/bin/env python
"""
Unit test for the RabbitMQ collectd plugin. Meant to be run with pytest.
"""
# Copyright (C) 2015 SignalFx, Inc.
import collections
import mock
import sys
import sample_responses
class MockCollectd(mock.MagicMock):
"""
Mocks the functions and objects provided by the collectd module
"""
@staticmethod
def log(log_str):
print log_str
debug = log
info = log
warning = log
error = log
def mock_api_call(url):
"""
Returns example statistics from the sample_responses module.
Args:
url (str): The URL whose results to mock
"""
endpoint = url.split('/')[-1]
return getattr(sample_responses, endpoint)
sys.modules['collectd'] = MockCollectd()
import rabbitmq
mock_config = mock.Mock()
ConfigOption = collections.namedtuple('ConfigOption', ['key', 'values'])
mock_config.children = [
ConfigOption('Username', ('guest',)),
ConfigOption('Password', ('guest',)),
ConfigOption('Host', ('localhost',)),
ConfigOption('Port', (15672,)),
ConfigOption('CollectChannels', (True,)),
ConfigOption('CollectConnections', (True,)),
ConfigOption('CollectExchanges', (True,)),
ConfigOption('CollectNodes', (True,)),
ConfigOption('CollectQueues', (True,)),
ConfigOption('HTTPTimeout', (5,)),
]
rabbitmq.config(mock_config)
@mock.patch('rabbitmq._api_call', mock_api_call)
def test_read():
"""
Tests the read() method of the collectd plugin. This codepath exercises
most of the code in the plugin.
"""
rabbitmq.read()
|
mit
| 6,049,282,584,712,901,000 | 22.681818 | 75 | 0.662188 | false |
synesenom/model.py
|
distributions/distribution.py
|
1
|
5033
|
#!/usr/bin/env python3
#title : distribution.py
#description : Imports distributions and defines their default settings.
#author : Enys Mones
#date : 2015.06.20
#version : 0.1
#usage : python distribution.py
#==============================================================================
import numpy as np
from core import core as co
from distributions.poisson import poisson
from distributions.exponential import exponential
from distributions.lognormal import lognormal
from distributions.weibull import weibull
from distributions.truncated_power_law import truncated_power_law
from distributions.shifted_power_law import shifted_power_law
from distributions.normal import normal
# Distribution names
DISTRIBUTION_POISSON = 'poisson'
DISTRIBUTION_EXPONENTIAL = 'exponential'
DISTRIBUTION_LOGNORMAL = 'lognormal'
DISTRIBUTION_WEIBULL = 'weibull'
DISTRIBUTION_SHIFTED_POWER_LAW = 'shifted-power-law'
DISTRIBUTION_TRUNCATED_POWER_LAW = 'truncated-power-law'
DISTRIBUTION_NORMAL = 'normal'
KEY_CLASS = 'class'
KEY_TEST_PARAMS = 'test-params'
KEY_INITIAL_FIT_PARAMS = 'initial-fit-params'
DISTRIBUTIONS = {
DISTRIBUTION_POISSON: {KEY_CLASS: poisson,
KEY_TEST_PARAMS: [3.4],
KEY_INITIAL_FIT_PARAMS: [20.0]},
DISTRIBUTION_EXPONENTIAL: {KEY_CLASS: exponential,
KEY_TEST_PARAMS: [17.0],
KEY_INITIAL_FIT_PARAMS: [10.0]},
DISTRIBUTION_SHIFTED_POWER_LAW: {KEY_CLASS: shifted_power_law,
KEY_TEST_PARAMS: [2.3, 20.7],
KEY_INITIAL_FIT_PARAMS: [1.2, 1.0]},
DISTRIBUTION_TRUNCATED_POWER_LAW: {KEY_CLASS: truncated_power_law,
KEY_TEST_PARAMS: [2.3, 123.0],
KEY_INITIAL_FIT_PARAMS: [1.2, 50.0]},
DISTRIBUTION_LOGNORMAL: {KEY_CLASS: lognormal,
KEY_TEST_PARAMS: [1.9, 1.1],
KEY_INITIAL_FIT_PARAMS: [1.0, 0.5]},
DISTRIBUTION_WEIBULL: {KEY_CLASS: weibull,
KEY_TEST_PARAMS: [0.5, 1.2],
KEY_INITIAL_FIT_PARAMS: [3.2, 0.8]},
DISTRIBUTION_NORMAL: {KEY_CLASS: normal,
KEY_TEST_PARAMS: [80.8, 8.9],
KEY_INITIAL_FIT_PARAMS: [10.0, 5.0]}
}
def get():
"""
Simply returns a sorted list of the available distributions.
:return: sorted list of available distributions.
"""
return sorted(list(DISTRIBUTIONS.keys()))
def get_sample_pmf(values):
"""
Creates the probability mass function from a sample of values.
:param values: sample of values.
:return: probability mass function as a numpy array.
"""
return np.histogram(values.astype(int), range(int(np.max(values))))[0] / len(values)
def get_sample_cdf(values):
"""
Creates the cumulative distribution from a sample of values.
:param values: sample of values.
:return: cumulative distribution.
"""
return np.cumsum(get_sample_pmf(values))
def pmf(distribution, params, domain=co.DEFAULT_PDF_MAX):
"""
Returns the probability mass function for the given distribution.
:param distribution: distribution to use.
:param params: parameters.
:param domain: domain size.
:return: probability mass function.
"""
return DISTRIBUTIONS[distribution][KEY_CLASS].pmf(params, domain=domain)
def cdf(distribution, params, domain=co.DEFAULT_PDF_MAX):
"""
Returns the cumulative distribution function of a given distribution.
:param distribution: distribution to use.
:param params: parameters.
:param domain: domain size.
:return: cumulative distribution function.
"""
return np.cumsum(pmf(distribution, params, domain=domain))
def samples(distribution, params, size=co.DEFAULT_SAMPLE_SIZE):
"""
Returns samples from a given distribution.
:param distribution: distribution to use.
:param params: parameters.
:param size: sample size
:return: numpy array of samples.
"""
return DISTRIBUTIONS[distribution][KEY_CLASS].samples(params, size=size)
def log_likelihood(distribution, params, data, nonzero_only=False):
"""
Returns the log-likelihood of a distribution over a given sample.
:param distribution: distribution to use.
:param params: parameters.
:param data: data to use.
:param nonzero_only: whether only non-zero data points should be used.
:return: log-likelihood.
"""
return DISTRIBUTIONS[distribution][KEY_CLASS].log_likelihood(params, data, nonzero_only)
def get_params(params, distribution):
"""
Creates a printable message of the parameter values.
:param params: list containing the parameters.
:param distribution: distribution to use.
:return: printable string of the parameter values.
"""
return DISTRIBUTIONS[distribution][KEY_CLASS].get_params(params)
|
mit
| -2,310,914,908,824,219,600 | 34.195804 | 92 | 0.641168 | false |
letsgoexploring/economicData
|
usConvergenceData/stateIncomeData.py
|
1
|
5246
|
# coding: utf-8
# In[1]:
from __future__ import division,unicode_literals
# get_ipython().magic('matplotlib inline')
import numpy as np
import pandas as pd
import json
import runProcs
from urllib.request import urlopen
import matplotlib.pyplot as plt
# In[2]:
# 0. State abbreviations
# 0.1 dictionary:
stateAbbr = {
u'Alabama':u'AL',
u'Alaska':u'AK',
u'Arizona':u'AZ',
u'Arkansas':u'AR',
u'California':u'CA',
u'Colorado':u'CO',
u'Connecticut':u'CT',
u'Delaware':u'DE',
u'District of Columbia':u'DC',
u'Florida':u'FL',
u'Georgia':u'GA',
u'Hawaii':u'HI',
u'Idaho':u'ID',
u'Illinois':u'IL',
u'Indiana':u'IN',
u'Iowa':u'IA',
u'Kansas':u'KS',
u'Kentucky':u'KY',
u'Louisiana':u'LA',
u'Maine':u'ME',
u'Maryland':u'MD',
u'Massachusetts':u'MA',
u'Michigan':u'MI',
u'Minnesota':u'MN',
u'Mississippi':u'MS',
u'Missouri':u'MO',
u'Montana':u'MT',
u'Nebraska':u'NE',
u'Nevada':u'NV',
u'New Hampshire':u'NH',
u'New Jersey':u'NJ',
u'New Mexico':u'NM',
u'New York':u'NY',
u'North Carolina':u'NC',
u'North Dakota':u'ND',
u'Ohio':u'OH',
u'Oklahoma':u'OK',
u'Oregon':u'OR',
u'Pennsylvania':u'PA',
u'Rhode Island':u'RI',
u'South Carolina':u'SC',
u'South Dakota':u'SD',
u'Tennessee':u'TN',
u'Texas':u'TX',
u'Utah':u'UT',
u'Vermont':u'VT',
u'Virginia':u'VA',
u'Washington':u'WA',
u'West Virginia':u'WV',
u'Wisconsin':u'WI',
u'Wyoming':u'WY'
}
# 0.2 List of states in the US
stateList = [s for s in stateAbbr]
# In[3]:
# 1. Construct series for price deflator
# 1.1 Obtain data from BEA
gdpDeflator = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=NIPA&TableID=13&Frequency=A&Year=X&ResultFormat=JSON&')
# result = gdpDeflator.readall().decode('utf-8')
result = gdpDeflator.read().decode('utf-8')
jsonResponse = json.loads(result)
# In[4]:
# 1.2 Construct the data frame for the deflator series
values = []
years = []
for element in jsonResponse['BEAAPI']['Results']['Data']:
# if element['LineDescription'] == 'Personal consumption expenditures':
if element['LineDescription'] == 'Gross domestic product':
years.append(element['TimePeriod'])
values.append(float(element['DataValue'])/100)
values = np.array([values]).T
dataP = pd.DataFrame(values,index = years,columns = ['price level'])
# 1.3 Display the data
print(dataP)
# In[5]:
# 2. Construct series for per capita income by state, region, and the entire us
# 2.1 Obtain data from BEA
stateYpc = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=RegionalData&KeyCode=PCPI_SI&Year=ALL&GeoFips=STATE&ResultFormat=JSON&')
# result = stateYpc.readall().decode('utf-8')
result = stateYpc.read().decode('utf-8')
jsonResponse = json.loads(result)
# jsonResponse['BEAAPI']['Results']['Data'][0]['GeoName']
# In[6]:
# 2.2 Construct the data frame for the per capita income series
# 2.2.1 Initialize the dataframe
regions = []
years = []
for element in jsonResponse['BEAAPI']['Results']['Data']:
if element['GeoName'] not in regions:
regions.append(element['GeoName'])
if element['TimePeriod'] not in years:
years.append(element['TimePeriod'])
df = np.zeros([len(years),len(regions)])
dataY = pd.DataFrame(df,index = years,columns = regions)
# 2.2.2 Populate the dataframe with values
for element in jsonResponse['BEAAPI']['Results']['Data']:
try:
dataY[element['GeoName']][element['TimePeriod']] = np.round(float(element[u'DataValue'])/float(dataP.loc[element['TimePeriod']]),2)# real
except:
dataY[element['GeoName']][element['TimePeriod']] = np.nan
# 2.2.3 Replace the state names in the index with abbreviations
columns=[]
for r in regions:
if r in stateList:
columns.append(stateAbbr[r])
else:
columns.append(r)
dataY.columns=columns
# 2.2.4 Display the data obtained from the BEA
dataY
# In[7]:
# 3. State income data for 1840, 1880, and 1900
# 3.1.1 Import Easterlin's income data
easterlin_data = pd.read_csv('Historical Statistics of the US - Easterlin State Income Data.csv',index_col=0)
# 3.1.2 Import historic CPI data
historic_cpi_data=pd.read_csv('Historical Statistics of the US - cpi.csv',index_col=0)
historic_cpi_data = historic_cpi_data/historic_cpi_data.loc[1929]*float(dataP.loc['1929'])
# In[8]:
# Const
df_1840 = easterlin_data['Income per capita - 1840 - A [cur dollars]']/float(historic_cpi_data.loc[1840])
df_1880 = easterlin_data['Income per capita - 1880 [cur dollars]']/float(historic_cpi_data.loc[1890])
df_1900 = easterlin_data['Income per capita - 1900 [cur dollars]']/float(historic_cpi_data.loc[1900])
df = pd.DataFrame({'1840':df_1840,'1880':df_1880,'1900':df_1900}).transpose()
# In[9]:
df = pd.concat([dataY,df]).sort_index()
# In[17]:
df.loc['1880'].sort_values()
# In[10]:
# 3. Export data to csv
series = dataY.sort_index()
series = df.sort_index()
dropCols = [u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West']
for c in dropCols:
series = series.drop([c],axis=1)
series.to_csv('stateIncomeData.csv',na_rep='NaN')
# In[11]:
len(dataY.columns)
# In[12]:
# 4. Export notebook to .py
runProcs.exportNb('stateIncomeData')
|
mit
| 3,401,800,098,482,707,500 | 23.745283 | 188 | 0.683378 | false |
gw0/myhdl
|
myhdl/test/conversion/toVerilog2/test_loops.py
|
1
|
1068
|
from __future__ import absolute_import
import os
path = os.path
from random import randrange
from myhdl import *
from myhdl.conversion import verify, analyze
from myhdl import ConversionError
from myhdl.conversion._misc import _error
def ForLoopError1(a, out):
@instance
def logic():
while 1:
yield a
var = 0
for i in range(1, 4, -1):
if a[i] == 1:
var += 1
out.next = var
return logic
def LoopBench(LoopTest):
a = Signal(intbv(-1)[16:])
z = Signal(intbv(0)[16:])
looptest_inst = LoopTest(a, z)
data = tuple([randrange(2**min(i, 16)) for i in range(100)])
@instance
def stimulus():
for i in range(100):
a.next = data[i]
yield delay(10)
print z
return stimulus, looptest_inst
def testForLoopError1():
try:
analyze(LoopBench, ForLoopError1)
except ConversionError as e:
assert e.kind == _error.Requirement
else:
assert False
|
lgpl-2.1
| -6,276,304,458,649,507,000 | 20.36 | 64 | 0.56367 | false |
ymorired/s4backup
|
filelister.py
|
1
|
1541
|
__author__ = 'mori.yuichiro'
import os
import fnmatch
class FileLister():
def __init__(self, target_path, ignore_dirs=None, ignore_file_patterns=None):
abs_path = os.path.abspath(target_path)
if not os.path.isdir(abs_path):
raise Exception('Invalid target path!')
self.target_path = abs_path
ignore_file_patterns = ignore_file_patterns or []
self.ignore_file_patterns = ignore_file_patterns
ignore_dirs = ignore_dirs or []
self.ignore_dirs = ignore_dirs
def _is_ignore_dir(self, dir_name):
relative_path = dir_name.replace(self.target_path, "", 1)
relative_path += '/'
for ignore_dir in self.ignore_dirs:
if relative_path.startswith('/' + ignore_dir + '/'):
return True
return False
def _is_ignore_file(self, file_name):
for ignore_pattern in self.ignore_file_patterns:
if fnmatch.fnmatch(file_name, ignore_pattern):
return True
return False
def _fild_all_files(self, directory):
for root, dirs, files in os.walk(directory):
if self._is_ignore_dir(root):
continue
for file_name in files:
if self._is_ignore_file(file_name):
continue
yield os.path.join(root, file_name)
def get_file_list(self):
files = []
for found_file in self._fild_all_files(self.target_path):
files.append(found_file)
return files
|
mit
| -2,292,767,076,066,646,300 | 29.82 | 81 | 0.578845 | false |
ant-Korn/math_tournaments
|
math_tournaments/users/migrations/0001_initial.py
|
1
|
2701
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-09-23 23:21
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='E-mail')),
('phone', models.CharField(blank=True, max_length=15, null=True, validators=[django.core.validators.RegexValidator(message="Номер телефона должен быть представлен в формате: '+999999999'. До 15 знаков доступно.", regex='^\\+?1?\\d{9,15}$')], verbose_name='Номер телефона')),
('first_name', models.CharField(max_length=30, verbose_name='Имя')),
('last_name', models.CharField(max_length=30, verbose_name='Фамилия')),
('birth_date', models.DateField(verbose_name='Дата рождения')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'пользователь',
'verbose_name_plural': 'пользователи',
},
managers=[
('objects', users.models.UserManager()),
],
),
]
|
mit
| 6,238,913,713,545,542,000 | 57.727273 | 323 | 0.618808 | false |
teoliphant/scipy
|
scipy/stats/distributions.py
|
2
|
215895
|
# Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import all, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
from _tukeylambda_stats import tukeylambda_variance as _tlvar, \
tukeylambda_kurtosis as _tlkurt
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete', 'binom', 'bernoulli', 'nbinom', 'geom',
'hypergeom', 'logser', 'poisson', 'planck', 'boltzmann', 'randint',
'zipf', 'dlaplace', 'skellam'
]
floatinfo = numpy.finfo(float)
gam = special.gamma
random = mtrand.random_sample
import types
from scipy.misc import doccer
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> [ %(shapes)s ] = [<Replace with reasonable values>]
>>> rv = %(name)s(%(shapes)s)
Display frozen pmf
>>> x = np.arange(0, np.minimum(rv.dist.b, 3))
>>> h = plt.vlines(x, 0, rv.pmf(x), lw=2)
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array_like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array_like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc : array_like, optional
location parameter (default = 0)
scale : array_like, optional
scale paramter (default = 1)
Returns
-------
a, b : array_like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
DEPRECATED
xb : float, optional
DEPRECATED
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=None, xb=None,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
if xa is not None:
warnings.warn("The `xa` parameter is deprecated and will be "
"removed in scipy 0.12", DeprecationWarning)
if xb is not None:
warnings.warn("The `xb` parameter is deprecated and will be "
"removed in scipy 0.12", DeprecationWarning)
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q,*args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q,*args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve, \
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(asarray(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)*(cond1==cond1)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(asarray,(q,loc,scale))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(asarray,(q,loc,scale))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#place(output,(1-cond0)*(cond1==cond1), self.badvalue)
place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(asarray,(loc,scale))
args = tuple(map(asarray,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = range(Nargs)
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead."""
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
place(output,cond0,self._entropy()+log(scale))
else:
place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the distribution
Location and scale only tested on a few examples.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the support
of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0, name='ksone', shapes="n")
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm')
## Alpha distribution
##
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha', shapes='a')
## Anglit distribution
##
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
## Beta distribution
##
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) *
(1-x)**(b-1),
for ``0 < x < 1``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta', shapes='a, b')
## Beta Prime
class betaprime_gen(rv_continuous):
"""A beta prima continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) =
gamma(a+b) / (gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(-a-b)
for ``x > 0``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b')
## Bradford
##
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', shapes='c')
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', shapes="c, d")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d=1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', shapes='c')
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi', shapes='df')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2', shapes='df')
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
## Double Gamma distribution
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma', shapes='a')
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(asarray(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull', shapes='c')
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma
distribution, with the shape parameter ``a`` an integer. Refer to
the ``gamma`` distribution for further examples.
"""
def _rvs(self, a):
return gamma.rvs(a, size=self._size)
def _arg_check(self, a):
return (a > 0) & (floor(a)==a)
def _pdf(self, x, a):
Px = (x)**(a-1.0)*exp(-x)/special.gamma(a)
return Px
def _logpdf(self, x, a):
return (a-1.0)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gdtr(1.0,a,x)
def _sf(self, x, a):
return special.gdtrc(1.0,a,x)
def _ppf(self, q, a):
return special.gdtrix(1.0, a, q)
def _stats(self, a):
a = a*1.0
return a, a, 2/sqrt(a), 6/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
erlang = erlang_gen(a=0.0, name='erlang', shapes='a')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = lambda * exp(- lambda*x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`expon` does not have shape parameters.
%(example)s
"""
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**asarray(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return (exm1c)**a
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib', shapes="a, c")
## Exponential Power
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b))
for ``x >= 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
xbm1 = x**(b-1.0)
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
return -expm1(-expm1(x**b))
def _sf(self, x, b):
return exp(-expm1(x**b))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow', shapes='b')
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Sanders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x,c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/asarray(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/asarray((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/asarray(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife', shapes='c')
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy', shapes='c')
## F
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = asarray(1.0*dfn)
# m = asarray(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = asarray(dfd*1.0)
v1 = asarray(dfn*1.0)
mu = where (v2 > 2, v2 / asarray(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f', shapes="dfn, dfd")
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm', shapes='c')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r', shapes='c')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min', shapes='c')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l', shapes='c')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max', shapes='c')
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic', shapes='c')
## Generalized Pareto
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c != 0``, and for ``x >= 0`` for all c,
and ``x < 1/abs(c)`` for ``c < 0``.
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,asarray(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,asarray(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0, name='genpareto', shapes='c')
## Generalized Exponential
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a,b,c > 0``.
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon', shapes='a, b, c')
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme', shapes='c')
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`gamma` has a shape parameter `a` which needs to be set explicitly. For instance:
>>> from scipy.stats import gamma
>>> rv = gamma(3., loc = 0., scale = 2.)
produces a frozen form of `gamma` with shape ``a = 3.``, ``loc =
0.`` and ``lambda = 1./scale = 1./2.``.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(example)s
"""
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0, name='gamma', shapes='a')
# Generalized Gamma
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma', shapes="a, c")
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
shapes='c')
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz', shapes='c')
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l')
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
shapes="a, b, c, z")
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma', shapes='a')
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', shapes="mu")
## Inverted Weibull
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),asarray(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull', shapes='c')
## Johnson SB
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb', shapes="a, b")
## Johnson SU
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu', shapes="a, b")
## Laplace Distribution
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
## Levy Distribution
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy")
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', shapes="alpha, beta")
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
"""A logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
%(example)s
"""
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic')
## Log Gamma
#
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', shapes='c')
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace', shapes='c')
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
%(example)s
"""
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm', shapes='s')
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
%(example)s
"""
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x, a) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', shapes="k, s")
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", shapes='nu')
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _logpdf(self, x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a)
return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0)))
def _pdf(self, x, df, nc):
return np.exp(self._logpdf(x, df, nc))
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', shapes="df, nc")
# Non-central F
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', shapes="dfn, dfd, nc")
## Student t distribution
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t', shapes="df")
## Non-central T distribution
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``, ``nc > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", shapes="df, nc")
# Pareto
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", shapes="b")
# LOMAX (Pareto of the second kind.)
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax", shapes="c")
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw", shapes="a")
# Power log normal
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm", shapes="c, s")
# Power Normal
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', shapes="c")
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist", shapes="c")
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal", shapes="a, b")
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", shapes="b")
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss', shapes="mu")
# Semicircular
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
# Triangular
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", shapes="c")
# Truncated Exponential
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon', shapes="b")
# Truncated Normal
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a,b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', shapes="a, b")
# Tukey-Lambda
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', shapes="lam")
# Uniform
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, b) = exp(b*cos(x)) / (2*pi*I[0](b))
for ``-pi <= x <= pi``, ``b > 0``.
%(example)s
"""
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', shapes="b")
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy', shapes="c")
### DISCRETE DISTRIBUTIONS
###
def entropy(pk, qk=None, base=None):
""" Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute a relative entropy
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0* pk / sum(pk, axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk, nonzero(qk == 0.0), axis=0) != 0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
S = -sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson", shapes="mu",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
Examples
--------
Custom made discrete distribution:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>> h = plt.plot(xk, custm.pmf(xk))
Random number generation:
>>> R = custm.rvs(size=100)
Display frozen pmf:
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : array_like
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.a-1)
place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.b)
place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
moments : string, optional
Composed of letters ['mvsk'] defining which moments to compute:
- 'm' = mean,
- 'v' = variance,
- 's' = (Fisher's) skew,
- 'k' = (Fisher's) kurtosis.
The default is'mv'.
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = asarray(loc)
args = tuple(map(asarray,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n : int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = asarray(loc)
args = map(asarray,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n,k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0,1,...,n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n,p,self._size)
def _argcheck(self, n, p):
self.b = n
return (n>=0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(p) + (n-k)*np.log(1-p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k,n,p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q,n,p))
vals1 = vals-1
temp = special.bdtr(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
q = 1.0-p
mu = n * p
var = n * p * q
g1 = (q-p) / sqrt(n*p*q)
g2 = (1.0-6*p*q)/(n*p*q)
return mu, var, g1, g2
def _entropy(self, n, p):
k = r_[0:n+1]
vals = self._pmf(k,n,p)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, p")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0,1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="p")
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(p,n) * power(1-p,x)
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + x*log(1-p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q,n,p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, p")
## Geometric distribution
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p,size=self._size)
def _argcheck(self, p):
return (p<=1) & (p >= 0)
def _pmf(self, k, p):
return (1-p)**(k-1) * p
def _logpmf(self, k, p):
return (k-1)*log(1-p) + p
def _cdf(self, x, p):
k = floor(x)
return (1.0-(1.0-p)**k)
def _sf(self, x, p):
k = floor(x)
return (1.0-p)**k
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = 1.0-(1.0-p)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="p")
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for N - (M-n) <= k <= min(m,N)
Examples
--------
>>> from scipy.stats import hypergeom
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom', shapes="M, n, N")
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='p')
## Poisson distribution
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
tmp = asarray(mu)
g1 = 1.0 / tmp
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson', shapes="mu")
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda))*exp(-lambda*k)
for ``k*lambda >= 0``.
`planck` takes ``lambda`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda")
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda)*exp(-lambda*k)/(1-exp(-lambda*N))
for ``k = 0,...,N-1``.
`boltzmann` takes ``lambda`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N")
## Discrete Uniform
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(max- min)
for ``k = min,...,max``.
`randint` takes ``min`` and ``max`` as shape parameters.
%(example)s
"""
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = asarray(max), asarray(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max")
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k) = 1/(zeta(a)*k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / asarray(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = special.errprint(0)
fac = asarray(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / asarray(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = special.errprint(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / asarray(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a")
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a >0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a")
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2")
|
bsd-3-clause
| -8,934,573,509,199,556,000 | 29.322331 | 94 | 0.531027 | false |
guptaankita/python-novaclient
|
novaclient/tests/unit/v2/test_versions.py
|
1
|
2817
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
from novaclient.v2 import versions
class VersionsTest(utils.TestCase):
def setUp(self):
super(VersionsTest, self).setUp()
self.cs = fakes.FakeClient()
self.service_type = versions.Version
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=False)
def test_list_services_with_http_client(self, mock_is_session_client):
self.cs.versions.list()
self.cs.assert_called('GET', None)
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=True)
def test_list_services_with_session_client(self, mock_is_session_client):
self.cs.versions.list()
self.cs.assert_called('GET', 'http://nova-api:8774/')
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=False)
@mock.patch.object(versions.VersionManager, 'list')
def test_get_current_with_http_client(self, mock_list,
mock_is_session_client):
current_version = versions.Version(
None, {"links": [{"href": "http://nova-api:8774/v2.1"}]},
loaded=True)
mock_list.return_value = [
versions.Version(
None, {"links": [{"href": "http://url/v1"}]}, loaded=True),
versions.Version(
None, {"links": [{"href": "http://url/v2"}]}, loaded=True),
versions.Version(
None, {"links": [{"href": "http://url/v3"}]}, loaded=True),
current_version,
versions.Version(
None, {"links": [{"href": "http://url/v21"}]}, loaded=True)]
self.assertEqual(current_version, self.cs.versions.get_current())
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=True)
def test_get_current_with_session_client(self, mock_is_session_client):
self.cs.callback = []
self.cs.versions.get_current()
self.cs.assert_called('GET', 'http://nova-api:8774/v2.1')
|
apache-2.0
| -2,989,190,966,836,458,000 | 41.681818 | 78 | 0.622293 | false |
Zurfazz/BiSci
|
SIZExWEIGHT/SIZExWEIGHT.py
|
1
|
1141
|
# @Author: Vegard S. <BioVegas>
# @Date: 17-Nov-2016
# @Email: vegard.stuerzinger@gmail.com
# @Project: BiSci
# @Filename: SIZExWEIGHT.py
# @Last modified by: BioVegas
# @Last modified time: 17-Nov-2016
# @Copyright: Open Source
import numpy as np
from Tkinter import *
#saving entry fields
filename="SIZExWEIGHT.txt"
file=open(filename, "w")
file.close()
def show_entry_fields():
print ("size: %s\nweight: %s" % (c1.get(),c2.get()))
def save_entry_fields():
file=open(filename,"a")
file.write(c1.get())
file.write(",")
file.write(c2.get())
file.write("\n")
file.close()
#Entry fields and buttons
top = Tk()
top.title("Size vs Weight")
t1=Label(top,text="Size")
t1.pack(side=LEFT)
c1=Entry(top, bd=5)
c1.pack(side=RIGHT)
t2=Label(top, text="Weight")
t2.pack(side=LEFT)
c2=Entry(top, bd=5)
c2.pack(side=RIGHT)
b1= Button(top, text="show", command=show_entry_fields)
b2= Button(top, text="save data", command=save_entry_fields)
#Formatting
t1.grid(row=0, column=0)
c1.grid(row=0, column=1)
t2.grid(row=1, column=0)
c2.grid(row=1, column=1)
b1.grid(row=2, column=0)
b2.grid(row=2, column=1)
top.mainloop()
|
mit
| -3,762,574,506,191,383,000 | 22.285714 | 60 | 0.678352 | false |
fortiema/stackexch-nlp
|
docs/conf.py
|
1
|
7917
|
# -*- coding: utf-8 -*-
#
# stackexch-nlp documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stackexch-nlp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'stackexch-nlpdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'stackexch-nlp.tex',
u'stackexch-nlp Documentation',
u"Matt Fortier <matt.fortier@openmailbox.org>", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'stackexch-nlp', u'stackexch-nlp Documentation',
[u"Matt Fortier <matt.fortier@openmailbox.org>"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'stackexch-nlp', u'stackexch-nlp Documentation',
u"Matt Fortier <matt.fortier@openmailbox.org>", 'stackexch-nlp',
'NLP shenanigans on Stack Exchange data dumps', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
mit
| -503,206,970,184,427,800 | 31.446721 | 80 | 0.698497 | false |
CTcue/CTcUMLS
|
_scripts/dbc_treatments.py
|
2
|
2239
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from elasticsearch import Elasticsearch, helpers
import csv
import argparse
import time
import json
import os
def read_rows(filename, delimiter=";"):
with open(filename, "rb") as f:
datareader = csv.reader(f, delimiter=str(delimiter))
header = next(datareader)
for line in datareader:
if len(line):
yield dict(zip(header, line))
def stamp():
return time.strftime("%Y-%m-%d %H:%M")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="ctAutocompletion upload script for DBC treatment codes & descriptions")
parser.add_argument('--elastic', dest='elastic', default=None, help='Elasticsearch authentication (optional)')
parser.add_argument('--file', dest='file', required=True, help='CSV file with DBC codes (20160101 Zorgproducten Tabel v20151119.csv.csv)')
parser.add_argument('--delimiter', dest='delimiter', default=",", help='CSV delimiter used')
args = parser.parse_args()
# Check if Elasticsearch auth is provided
if args.elastic:
_auth = tuple(args.elastic.split(":"))
else:
_auth = ("", "")
elastic = Elasticsearch(http_auth=_auth)
print("[%s] Starting upload." % stamp())
bulk = []
counter = 1
for row in read_rows(args.file, args.delimiter):
try:
# VERSIE, DATUM_BESTAND, PEILDATUM, ZORGPRODUCT_CD, LATIJN_OMS, CONSUMENT_OMS, DECLARATIE_VERZEKERD_CD, DECLARATIE_ONVERZEKERD_CD
bulk.append({
"_index": "dbc_zorgproduct",
"_type": "treatments",
"description" : row["CONSUMENT_OMS"],
"description_latin" : row["LATIJN_OMS"].split(" | "),
"product_code" : row["ZORGPRODUCT_CD"]
})
except Exception as err:
print(err)
print("ERROR: The provided csv file has a different header / contents than expected.")
break
counter += 1
if counter % 200 == 0:
helpers.bulk(elastic, bulk)
bulk = []
helpers.bulk(elastic, bulk)
print("[%s] Uploading complete." % stamp())
|
mit
| -1,460,594,322,885,049,900 | 29.671233 | 142 | 0.599375 | false |
itdxer/neupy
|
tests/algorithms/gd/test_optimizers.py
|
1
|
4608
|
from functools import partial
from neupy import algorithms, layers
from helpers import simple_classification, compare_networks
from base import BaseTestCase
class OptimizersTestCase(BaseTestCase):
def setUp(self):
super(OptimizersTestCase, self).setUp()
self.network = layers.join(
layers.Input(10),
layers.Sigmoid(20),
layers.Sigmoid(1),
)
def test_adadelta(self):
x_train, x_test, y_train, y_test = simple_classification()
optimizer = algorithms.Adadelta(
self.network,
batch_size=None,
verbose=False,
rho=0.95,
epsilon=1e-5,
)
optimizer.train(x_train, y_train, x_test, y_test, epochs=100)
self.assertGreater(0.05, optimizer.errors.train[-1])
self.assertGreater(0.15, optimizer.errors.valid[-1])
def test_adagrad(self):
x_train, x_test, y_train, y_test = simple_classification()
optimizer = algorithms.Adagrad(
self.network,
step=0.1,
batch_size=None,
verbose=False,
)
optimizer.train(x_train, y_train, x_test, y_test, epochs=150)
self.assertGreater(0.15, optimizer.errors.valid[-1])
def test_adam(self):
x_train, x_test, y_train, y_test = simple_classification()
optimizer = algorithms.Adam(
self.network,
step=0.1,
verbose=False,
epsilon=1e-4,
beta1=0.9,
beta2=0.99,
)
optimizer.train(x_train, y_train, x_test, y_test, epochs=200)
self.assertGreater(0.2, optimizer.errors.valid[-1])
def test_rmsprop(self):
x_train, x_test, y_train, y_test = simple_classification()
optimizer = algorithms.RMSProp(
self.network,
step=0.02,
batch_size=None,
verbose=False,
epsilon=1e-5,
decay=0.9,
)
optimizer.train(x_train, y_train, x_test, y_test, epochs=150)
self.assertGreater(0.15, optimizer.errors.valid[-1])
def test_momentum(self):
x_train, x_test, y_train, y_test = simple_classification()
optimizer = algorithms.Momentum(
self.network,
step=0.35,
momentum=0.99,
batch_size=None,
verbose=False,
nesterov=True,
)
optimizer.train(x_train, y_train, x_test, y_test, epochs=30)
self.assertGreater(0.15, optimizer.errors.valid[-1])
def test_adamax(self):
x_train, x_test, y_train, y_test = simple_classification()
mnet = algorithms.Adamax(
self.network,
step=0.1,
batch_size=None,
verbose=False,
epsilon=1e-7,
beta1=0.9,
beta2=0.999,
)
mnet.train(x_train, y_train, x_test, y_test, epochs=50)
self.assertGreater(0.15, mnet.errors.train[-1])
def test_adamax_overfit(self):
self.assertCanNetworkOverfit(
partial(algorithms.Adamax, step=0.2, verbose=False),
epochs=400)
class MomentumTestCase(BaseTestCase):
def test_momentum_with_minibatch(self):
x_train, _, y_train, _ = simple_classification()
compare_networks(
# Test classes
partial(algorithms.Momentum, batch_size=None),
partial(algorithms.Momentum, batch_size=1),
# Test data
(x_train, y_train),
# Network configurations
network=[
layers.Input(10),
layers.Sigmoid(20),
layers.Sigmoid(1)
],
step=0.25,
momentum=0.1,
shuffle_data=True,
verbose=False,
# Test configurations
epochs=40,
show_comparison_plot=False,
)
def test_nesterov_momentum(self):
x_train, _, y_train, _ = simple_classification()
compare_networks(
# Test classes
partial(algorithms.Momentum, nesterov=False),
partial(algorithms.Momentum, nesterov=True),
# Test data
(x_train, y_train),
# Network configurations
network=[
layers.Input(10),
layers.Sigmoid(20),
layers.Sigmoid(1)
],
batch_size=None,
step=0.25,
momentum=0.9,
shuffle_data=True,
verbose=False,
# Test configurations
epochs=10,
show_comparison_plot=False,
)
|
mit
| 672,359,132,601,342,300 | 30.135135 | 69 | 0.543837 | false |
dimara/synnefo
|
snf-cyclades-app/synnefo/volume/util.py
|
1
|
6444
|
# Copyright (C) 2010-2015 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from synnefo.db import models
from snf_django.lib.api import faults
from synnefo.api.util import get_image_dict, get_vm
from synnefo.plankton import backend
from synnefo.cyclades_settings import cyclades_services, BASE_HOST
from synnefo.lib import join_urls
from synnefo.lib.services import get_service_path
def assert_snapshots_enabled(request):
if not snapshots_enabled_for_user(request.user):
raise faults.NotImplemented("Snapshots disabled")
def snapshots_enabled_for_user(user):
snapshots_enabled= settings.CYCLADES_SNAPSHOTS_ENABLED
if type(snapshots_enabled) not in (list, tuple):
return snapshots_enabled
if not user:
return False
try:
user_groups = map(lambda x: x['name'],
user['access']['user'].get('roles', []))
except KeyError:
return False
return len(set(user_groups).intersection(snapshots_enabled)) > 0
def mark_volume_as_deleted(volume, immediate=False):
if volume.delete_on_termination:
volume.status = "DELETED" if immediate else "DELETING"
else:
volume.status = "DETACHING"
if immediate:
volume.deleted = True
volume.save()
def is_volume_type_detachable(volume_type):
"""Check if the volume type is detachable."""
if volume_type is None:
raise faults.BadRequest("Volume type must be provided")
if (volume_type.disk_template in
settings.CYCLADES_DETACHABLE_DISK_TEMPLATES):
return True
else:
return False
def assert_detachable_volume_type(volume_type):
"""Assert that the volume type is detachable.
Raise a BadRequest exception in case the volume type is not detachable.
"""
if not is_volume_type_detachable(volume_type):
raise faults.BadRequest("Volume type '%s' is not detachable" %
volume_type.name)
def assign_volume_to_server(server, volume, index=None):
"""Assign a volume to a server.
This function works at DB level. It associates the volume with the server
and automatically computes the proper index for the volumer, if not given.
"""
# Get a list of indexes of the volumes that are attached to the given VM.
indexes = map(lambda v: v.index, server.volumes.filter(deleted=False))
if index is None:
if indexes == []:
# If the server has no volumes, automatically assign the index 0.
index = 0
else:
# Else, find the largest index and add 1.
index = reduce(max, indexes) + 1
elif index in indexes:
raise faults.BadRequest("Cannot set the index of volume '%s' to '%s',"
" since it is used by another volume of"
" server '%s'.", volume, index, server)
volume.index = index
volume.machine = server
volume.save()
return volume
def get_volume(user_id, projects, volume_id, for_update=False,
non_deleted=False, exception=faults.ItemNotFound):
volumes = models.Volume.objects.for_user(user_id, projects)
if for_update:
volumes = volumes.select_for_update()
try:
volume_id = int(volume_id)
except (TypeError, ValueError):
raise faults.BadRequest("Invalid volume id: %s" % volume_id)
try:
volume = volumes.get(id=volume_id)
if non_deleted and volume.deleted:
raise faults.BadRequest("Volume '%s' has been deleted."
% volume_id)
return volume
except models.Volume.DoesNotExist:
raise exception("Volume %s not found" % volume_id)
def normalize_volume_type_id(volume_type_id):
try:
return int(volume_type_id)
except (TypeError, ValueError):
raise faults.BadRequest("Invalid volume type id: %s" % volume_type_id)
def get_volume_type(volume_type_id, for_update=False, include_deleted=False,
exception=faults.ItemNotFound):
vtypes = models.VolumeType.objects
if not include_deleted:
vtypes = vtypes.filter(deleted=False)
if for_update:
vtypes = vtypes.select_for_update()
vtype_id = normalize_volume_type_id(volume_type_id)
try:
return vtypes.get(id=vtype_id)
except models.VolumeType.DoesNotExist:
raise exception("Volume type %s not found" % vtype_id)
def get_snapshot(user_id, snapshot_id, exception=faults.ItemNotFound):
try:
with backend.PlanktonBackend(user_id) as b:
return b.get_snapshot(snapshot_id)
except faults.ItemNotFound:
raise exception("Snapshot %s not found" % snapshot_id)
def get_image(user_id, image_id, exception=faults.ItemNotFound):
try:
return get_image_dict(image_id, user_id)
except faults.ItemNotFound:
raise exception("Image %s not found" % image_id)
VOLUME_URL = \
join_urls(BASE_HOST,
get_service_path(cyclades_services, "volume", version="v2.0"))
VOLUMES_URL = join_urls(VOLUME_URL, "volumes/")
SNAPSHOTS_URL = join_urls(VOLUME_URL, "snapshots/")
def volume_to_links(volume_id):
href = join_urls(VOLUMES_URL, str(volume_id))
return [{"rel": rel, "href": href} for rel in ("self", "bookmark")]
def snapshot_to_links(snapshot_id):
href = join_urls(SNAPSHOTS_URL, str(snapshot_id))
return [{"rel": rel, "href": href} for rel in ("self", "bookmark")]
def update_snapshot_state(snapshot_id, user_id, state):
"""Update the state of a snapshot in Pithos.
Use PithosBackend in order to update the state of the snapshots in
Pithos DB.
"""
with backend.PlanktonBackend(user_id) as b:
return b.update_snapshot_state(snapshot_id, state=state)
|
gpl-3.0
| 2,165,029,334,500,874,500 | 32.915789 | 78 | 0.665115 | false |
ambrosef/HLx_Examples
|
Acceleration/memcached/regressionSims/testgen/memtest_regressions.py
|
1
|
5540
|
#!/usr/bin/python
import memlib
## EDIT HERE ###################################################################
keySizes = [1,3,4,5,7,8,9,10,12,13,15,16,17,24,25,28,84,128]
#valueSizes = [1,3,4,5,8,9,10,12,13,16,17,24,28,184,208,1024]
valueSizes = [1,3,4,5,8,9,10,12,13,16,17,24,28,184,208, 256]
seq1repeat = 5
keyChars = map(chr, range(97, 123))
valueChars = map(chr, range(65, 91))
## EDIT FINISHED ###############################################################
DEBUG_SEQUENCES = False
PRINT_SEQUENCES = True
################################################################################
if DEBUG_SEQUENCES:
keySizes = [1,2,3]
valueSizes = [1,2]
def pair2kvpair(pair):
return memlib.kv_pair(pair[0], pair[1], "EFBEADDE", 42)
def seq1(keys, values, repeat):
if PRINT_SEQUENCES:
print "--- SEQUENCE 1 repeat %-3s -----------------------------------------------------" % repeat
kv_pairs = []
for key in keys:
for value in values:
kv_pairs.append( memlib.kv_pair(key, value, "EFBEADDE", 42) )
requests = []
responses = []
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Set [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binarySetRequest( kv_pair , "00000000" ) )
responses.append( memlib.binarySetResponse( kv_pair, "00000000" ) )
for _ in range(repeat):
if PRINT_SEQUENCES:
print "Get [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binaryGetRequest( kv_pair , "00000000" ) )
responses.append( memlib.binaryGetResponse( kv_pair , "00000000" ) )
return (requests, responses)
def seq2(keys, values):
if PRINT_SEQUENCES:
print "--- SEQUENCE 2 -----------------------------------------------------------------"
requests = []
responses = []
for _ in range(len(values)):
# for more keys than values, duplicate use of values
values_used = values
if len(keys) > len(values):
while(len(keys) > len(values_used)):
values_used = values_used + values
values_used = values_used[0:len(keys)]
# requests
kv_pairs = map(pair2kvpair, zip(keys, values_used))
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Set [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binarySetRequest(kv_pair, "00000000") )
responses.append( memlib.binarySetResponse(kv_pair, "00000000") )
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Get [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binaryGetRequest(kv_pair, "00000000") )
responses.append( memlib.binaryGetResponse(kv_pair, "00000000") )
# rotation
values = values[1:] + values[0:1]
return (requests, responses)
################################################################################
if len(keySizes) > len(keyChars):
sys.exit("Error: Not enough key characters.")
if len(valueSizes) > len(valueChars):
sys.exit("Error: Not enough value characters.")
keyPairs = zip(keySizes, keyChars)
valuePairs = zip(valueSizes, valueChars)
keys = map(lambda (size, char): char * size, keyPairs)
values = map(lambda (size, char): char * size, valuePairs)
SEQ1 = seq1(keys, values, seq1repeat)
SEQ2 = seq2(keys, values)
SEQ3 = seq1(keys, values, 1)
# SEQ1
req = open("SEQ1_R12-pkt.in.txt", "w")
req.write( memlib.requests12Gbps(SEQ1[0]) )
req.close()
req = open("SEQ1_R1-pkt.in.txt", "w")
req.write( memlib.requests1Gbps(SEQ1[0]) )
req.close()
res = open("SEQ1-pkt.out.txt", "w")
res.write( memlib.responses(SEQ1[1]) )
res.close()
res = open("SEQ1-pkt.out.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ1[1]) )
res.close()
# SEQ2
req = open("SEQ2_R12-pkt.in.txt", "w")
req.write( memlib.requests12Gbps(SEQ2[0]) )
req.close()
req = open("SEQ2_R1-pkt.in.txt", "w")
req.write( memlib.requests1Gbps(SEQ2[0]) )
req.close()
res = open("SEQ2-pkt.out.txt", "w")
res.write( memlib.responses(SEQ2[1]) )
res.close()
res = open("SEQ2-pkt.out.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ2[1]) )
res.close()
# SEQ3
req = open("SEQ3_R12-pkt.in.txt", "w")
req.write( memlib.requests12Gbps(SEQ3[0]) )
req.close()
req = open("SEQ3_R1-pkt.in.txt", "w")
req.write( memlib.requests1Gbps(SEQ3[0]) )
req.close()
res = open("SEQ3-pkt.out.txt", "w")
res.write( memlib.responses(SEQ3[1]) )
res.close()
res = open("SEQ3-pkt.out.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ3[1]) )
res.close()
####### Same thing for HLS outputs #######
# SEQ1
req = open("SEQ1_R12-pkt.in.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ1[0]) )
req.close()
req = open("SEQ1_R1-pkt.in.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ1[0]) )
req.close()
res = open("SEQ1-pkt.out.hls.txt", "w")
res.write( memlib.responses_hls(SEQ1[1]) )
res.close()
# SEQ2
req = open("SEQ2_R12-pkt.in.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ2[0]) )
req.close()
req = open("SEQ2_R1-pkt.in.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ2[0]) )
req.close()
res = open("SEQ2-pkt.out.hls.txt", "w")
res.write( memlib.responses_hls(SEQ2[1]) )
res.close()
# SEQ3
req = open("SEQ3_R12-pkt.in.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ3[0]) )
req.close()
req = open("SEQ3_R1-pkt.in.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ3[0]) )
req.close()
res = open("SEQ3-pkt.out.hls.txt", "w")
res.write( memlib.responses_hls(SEQ3[1]) )
res.close()
|
bsd-3-clause
| 1,551,864,709,077,715,000 | 30.657143 | 117 | 0.609567 | false |
imk1/IMKTFBindingCode
|
selectFIMOCutoff.py
|
1
|
5703
|
import sys
import argparse
from itertools import izip
import pybedtools as bt
import os
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description="Select the FIMO cutoff by maximizing F1 for a list of FIMO files")
parser.add_argument("--FIMOFileListFileName", required=True, \
help="List of FIMO txt files that will be overlapped with each file in the list (motif hits for TF in DNase union)")
parser.add_argument("--peakFileNameListFileName", required=False, \
help="List of peak files that will be used for positives in computing the F1")
parser.add_argument("--relaxedPeakFileNameListFileName", required=False, \
help="List of peak files that will be excluded from negatives in computing the F1")
parser.add_argument("--DNaseForBalancedAcc", required=False, \
default="/srv/scratch/shared/surya/imk1/TFBindingPredictionProject/Polstein2015Data/out/peak/idr/optimal_set/Polstein2015_MergedReps_rep1-rep2.IDR0.1.filt.narrowPeak.gz", \
help="DNase file that will be used for negatives in computing the balanced accuracy")
parser.add_argument("--FIMOBedFileNameListFileName", required=True, \
help="File where names of selected FIMO bed files will be written")
options = parser.parse_args()
return options
def optimizeF1(options, bedThreshFileNameList, peakFileName, relaxedPeakFileName):
DNaseForBalancedAccRegions = bt.BedTool(options.DNaseForBalancedAcc)
bestF1 = 0.0
bestF1FileName = None
threshOneMotifHits = bt.BedTool()
peaks = bt.BedTool(peakFileName)
positiveRegions = DNaseForBalancedAccRegions.intersect(peaks, wa = True, u = True)
numPos = DNaseForBalancedAccRegions.intersect(peaks, wa = True, u = True).count()
relaxedPeaks = bt.BedTool(relaxedPeakFileName)
negativeRegions = DNaseForBalancedAccRegions.subtract(relaxedPeaks, A = True)
numNeg = DNaseForBalancedAccRegions.subtract(relaxedPeaks, A = True).count()
for bedThreshFileName in bedThreshFileNameList:
# Iterate through the thresholds and get the balanced acc for each
threshMotifHits = bt.BedTool(bedThreshFileName)
numPosWithMotifHits = positiveRegions.intersect(threshMotifHits, wa = True, u = True).count()
if numPosWithMotifHits == 0:
# No peaks have motif hits, so skip this cutoff
continue
posAcc = float(numPosWithMotifHits)/float(numPos)
numNegWithMotifHits = negativeRegions.intersect(threshMotifHits, wa = True, u = True).count()
precision = float(numPosWithMotifHits) / float(numPosWithMotifHits + numNegWithMotifHits)
F1 = 2.0 * ((posAcc * precision)/(posAcc + precision))
if F1 < 0.2:
# The F1 is not good enough
os.remove(bedThreshFileName)
continue
if F1 > bestF1:
# The current accuracy is the best one
bestF1 = F1
threshOneMotifHits = threshMotifHits
if bestF1FileName:
# Remove the previous file that had the best F1 since this file is better
os.remove(bestF1FileName)
bestF1FileName = bedThreshFileName
return bestF1FileName
def selectFIMOCutoff(options):
# Select the FIMO cutoff by maximizing F1 for a list of FIMO files
FIMOFileListFile = open(options.FIMOFileListFileName)
peakFileNameListFile = open(options.peakFileNameListFileName)
relaxedPeakFileNameListFile = open(options.relaxedPeakFileNameListFileName)
FIMOBedFileNameListFile = open(options.FIMOBedFileNameListFileName, 'w+')
for FIMOFileNameStr, peakFileNameStr, relaxedPeakFileNameStr in \
izip(FIMOFileListFile, peakFileNameListFile, relaxedPeakFileNameListFile):
# Iterate through the FIMO files and choose the appropriate p-value cutoff for each
FIMOFileName = FIMOFileNameStr.strip()
FIMOFileNameElements = FIMOFileName.split("/")
FIMOFile = open(FIMOFileName)
bedThreshFileList = []
bedThreshFileNameList = []
thresholdList = [10 ** (0 - i) for i in range(4, 13)]
# Create bed files for many thresholds
for threshold in thresholdList:
# Iterate through the thresholds and create a file for each
bedThreshFileName = "/".join(FIMOFileNameElements[0:-1]) + "/" + "fimoNeg" + str(threshold) + ".bed"
bedThreshFileNameList.append(bedThreshFileName)
bedThreshFile = open(bedThreshFileName, 'w+')
bedThreshFileList.append(bedThreshFile)
FIMOFile.readline() # REMOVE THE HEADER
firstLine = True
motifLength = None
for line in FIMOFile:
# Convert each line from the FIMO file into bed format
lineElements = line.strip().split("\t")
if firstLine:
# At the first line, so get the motif length
if line == "":
# There are no motif hits, so stop
break
motifLength = len(lineElements[9])
firstLine = False
pVal = float(lineElements[7])
for threshold, bedThreshFile in izip(thresholdList, bedThreshFileList):
# Iterate through the thresholds and record the data in all files that meet the threshold cutoff
if pVal < threshold:
# The p-value is below the current threshold
bedThreshFile.write("\t".join([lineElements[2], lineElements[3], lineElements[4], ".", lineElements[6], \
lineElements[5]]) + "\n")
else:
# The p-value is not below the current threshold, so it will not be below any of the other thresholds
break
FIMOFile.close()
for bedThreshFile in bedThreshFileList:
# Iterate through the files and close each
bedThreshFile.close()
bestF1FileName = optimizeF1(options, bedThreshFileNameList, peakFileNameStr.strip(), relaxedPeakFileNameStr.strip())
if bestF1FileName:
# There is a cutoff with a sufficiently good F1
FIMOBedFileNameListFile.write(bestF1FileName + "\n")
FIMOFileListFile.close()
peakFileNameListFile.close()
relaxedPeakFileNameListFile.close()
FIMOBedFileNameListFile.close()
if __name__ == "__main__":
options = parseArgument()
selectFIMOCutoff(options)
|
mit
| 4,666,782,831,651,699,000 | 46.525 | 174 | 0.761003 | false |
coreycb/horizon
|
openstack_dashboard/api/keystone.py
|
1
|
33412
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import six
import six.moves.urllib.parse as urlparse
from keystoneclient import exceptions as keystone_exceptions
from openstack_auth import backend
from openstack_auth import utils as auth_utils
from horizon import exceptions
from horizon import messages
from horizon.utils import functions as utils
from openstack_dashboard.api import base
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
DEFAULT_DOMAIN = getattr(settings, 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN',
'default')
# Set up our data structure for managing Identity API versions, and
# add a couple utility methods to it.
class IdentityAPIVersionManager(base.APIVersionManager):
def upgrade_v2_user(self, user):
if getattr(user, "project_id", None) is None:
user.project_id = getattr(user, "default_project_id",
getattr(user, "tenantId", None))
return user
def get_project_manager(self, *args, **kwargs):
if VERSIONS.active < 3:
manager = keystoneclient(*args, **kwargs).tenants
else:
manager = keystoneclient(*args, **kwargs).projects
return manager
VERSIONS = IdentityAPIVersionManager(
"identity", preferred_version=auth_utils.get_keystone_version())
# Import from oldest to newest so that "preferred" takes correct precedence.
try:
from keystoneclient.v2_0 import client as keystone_client_v2
VERSIONS.load_supported_version(2.0, {"client": keystone_client_v2})
except ImportError:
pass
try:
from keystoneclient.v3 import client as keystone_client_v3
VERSIONS.load_supported_version(3, {"client": keystone_client_v3})
except ImportError:
pass
@six.python_2_unicode_compatible
class Service(base.APIDictWrapper):
"""Wrapper for a dict based on the service data from keystone."""
_attrs = ['id', 'type', 'name']
def __init__(self, service, region, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.public_url = base.get_url_for_service(service, region,
'publicURL')
self.url = base.get_url_for_service(service, region, 'internalURL')
if self.url:
self.host = urlparse.urlparse(self.url).hostname
else:
self.host = None
self.disabled = None
self.region = region
def __str__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type, "backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % six.text_type(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
url = base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
else:
auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')
url = request.session.get('region_endpoint', auth_url)
# TODO(gabriel): When the Service Catalog no longer contains API versions
# in the endpoints this can be removed.
url = auth_utils.fix_auth_url_version(url)
return url
def keystoneclient(request, admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
api_version = VERSIONS.get_active_version()
user = request.user
token_id = user.token.id
if is_multi_domain_enabled:
# Cloud Admin, Domain Admin or Mixed Domain Admin
if is_domain_admin(request):
domain_token = request.session.get('domain_token')
if domain_token:
token_id = getattr(domain_token, 'auth_token', None)
if admin:
if not policy.check((("identity", "admin_required"),), request):
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystoneclient_admin" if admin \
else backend.KEYSTONE_CLIENT_ATTR
if (hasattr(request, cache_attr) and
(not user.token.id or
getattr(request, cache_attr).auth_token == user.token.id)):
conn = getattr(request, cache_attr)
else:
endpoint = _get_endpoint_url(request, endpoint_type)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug("Creating a new keystoneclient connection to %s." % endpoint)
remote_addr = request.environ.get('REMOTE_ADDR', '')
conn = api_version['client'].Client(token=token_id,
endpoint=endpoint,
original_ip=remote_addr,
insecure=insecure,
cacert=cacert,
auth_url=endpoint,
debug=settings.DEBUG)
setattr(request, cache_attr, conn)
return conn
def domain_create(request, name, description=None, enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.create(name=name,
description=description,
enabled=enabled)
def domain_get(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.get(domain_id)
def domain_delete(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.delete(domain_id)
def domain_list(request):
manager = keystoneclient(request, admin=True).domains
return manager.list()
def domain_lookup(request):
if policy.check((("identity", "identity:list_domains"),), request) \
and request.session.get('domain_token'):
try:
domains = domain_list(request)
return dict((d.id, d.name) for d in domains)
except Exception:
LOG.warning("Pure project admin doesn't have a domain token")
return None
else:
domain = get_default_domain(request)
return {domain.id: domain.name}
def domain_update(request, domain_id, name=None, description=None,
enabled=None):
manager = keystoneclient(request, admin=True).domains
try:
response = manager.update(domain_id, name=name,
description=description, enabled=enabled)
except Exception:
LOG.exception("Unable to update Domain: %s" % domain_id)
raise
return response
def tenant_create(request, name, description=None, enabled=None,
domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
try:
if VERSIONS.active < 3:
return manager.create(name, description, enabled, **kwargs)
else:
return manager.create(name, domain,
description=description,
enabled=enabled, **kwargs)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def get_default_domain(request, get_name=True):
"""Gets the default domain object to use when creating Identity object.
Returns the domain context if is set, otherwise return the domain
of the logon user.
:param get_name: Whether to get the domain name from Keystone if the
context isn't set. Setting this to False prevents an unnecessary call
to Keystone if only the domain ID is needed.
"""
domain_id = request.session.get("domain_context", None)
domain_name = request.session.get("domain_context_name", None)
# if running in Keystone V3 or later
if VERSIONS.active >= 3 and domain_id is None:
# if no domain context set, default to user's domain
domain_id = request.user.user_domain_id
domain_name = request.user.user_domain_name
if get_name and not request.user.is_federated:
try:
domain = domain_get(request, domain_id)
domain_name = domain.name
except Exception:
LOG.warning("Unable to retrieve Domain: %s" % domain_id)
domain = base.APIDictWrapper({"id": domain_id,
"name": domain_name})
return domain
def get_effective_domain_id(request):
"""Gets the id of the default domain to use when creating Identity objects.
If the requests default domain is the same as DEFAULT_DOMAIN, return None.
"""
domain_id = get_default_domain(request).get('id')
return None if domain_id == DEFAULT_DOMAIN else domain_id
def is_cloud_admin(request):
return policy.check((("identity", "cloud_admin"),), request)
def is_domain_admin(request):
return policy.check(
(("identity", "admin_and_matching_domain_id"),), request)
# TODO(gabriel): Is there ever a valid case for admin to be false here?
# A quick search through the codebase reveals that it's always called with
# admin=true so I suspect we could eliminate it entirely as with the other
# tenant commands.
def tenant_get(request, project, admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
return manager.get(project)
def tenant_delete(request, project):
manager = VERSIONS.get_project_manager(request, admin=True)
return manager.delete(project)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None,
admin=True, filters=None):
manager = VERSIONS.get_project_manager(request, admin=admin)
page_size = utils.get_page_size(request)
limit = None
if paginate:
limit = page_size + 1
has_more_data = False
# if requesting the projects for the current user,
# return the list from the cache
if user == request.user.id:
tenants = request.user.authorized_tenants
elif VERSIONS.active < 3:
tenants = manager.list(limit, marker)
if paginate and len(tenants) > page_size:
tenants.pop(-1)
has_more_data = True
# V3 API
else:
domain_id = get_effective_domain_id(request)
kwargs = {
"domain": domain_id,
"user": user
}
if filters is not None:
kwargs.update(filters)
tenants = manager.list(**kwargs)
return tenants, has_more_data
def tenant_update(request, project, name=None, description=None,
enabled=None, domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
try:
if VERSIONS.active < 3:
return manager.update(project, name, description, enabled,
**kwargs)
else:
return manager.update(project, name=name, description=description,
enabled=enabled, domain=domain, **kwargs)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def user_list(request, project=None, domain=None, group=None, filters=None):
if VERSIONS.active < 3:
kwargs = {"tenant_id": project}
else:
kwargs = {
"project": project,
"domain": domain,
"group": group
}
if filters is not None:
kwargs.update(filters)
users = keystoneclient(request, admin=True).users.list(**kwargs)
return [VERSIONS.upgrade_v2_user(user) for user in users]
def user_create(request, name=None, email=None, password=None, project=None,
enabled=None, domain=None, description=None, **data):
manager = keystoneclient(request, admin=True).users
try:
if VERSIONS.active < 3:
user = manager.create(name, password, email, project, enabled)
return VERSIONS.upgrade_v2_user(user)
else:
return manager.create(name, password=password, email=email,
default_project=project, enabled=enabled,
domain=domain, description=description,
**data)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def user_delete(request, user_id):
return keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
user = keystoneclient(request, admin=admin).users.get(user_id)
return VERSIONS.upgrade_v2_user(user)
def user_update(request, user, **data):
manager = keystoneclient(request, admin=True).users
error = None
if not keystone_can_edit_user():
raise keystone_exceptions.ClientException(
405, _("Identity service does not allow editing user data."))
# The v2 API updates user model and default project separately
if VERSIONS.active < 3:
# Update user details
try:
user = manager.update(user, **data)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
except Exception:
error = exceptions.handle(request, ignore=True)
if "project" in data:
project = data.pop('project')
# Update default tenant
try:
user_update_tenant(request, user, project)
user.tenantId = project
except Exception:
error = exceptions.handle(request, ignore=True)
# Check for existing roles
# Show a warning if no role exists for the project
user_roles = roles_for_user(request, user, project)
if not user_roles:
messages.warning(request,
_('User %s has no role defined for '
'that project.')
% data.get('name', None))
if error is not None:
raise error
# v3 API is so much simpler...
else:
try:
user = manager.update(user, **data)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def user_update_enabled(request, user, enabled):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
return manager.update_enabled(user, enabled)
else:
return manager.update(user, enabled=enabled)
def user_update_password(request, user, password, admin=True):
if not keystone_can_edit_user():
raise keystone_exceptions.ClientException(
405, _("Identity service does not allow editing user password."))
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_password(user, password)
else:
return manager.update(user, password=password)
def user_verify_admin_password(request, admin_password):
# attempt to create a new client instance with admin password to
# verify if it's correct.
client = keystone_client_v2 if VERSIONS.active < 3 else keystone_client_v3
try:
endpoint = _get_endpoint_url(request, 'internalURL')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
client.Client(
username=request.user.username,
password=admin_password,
insecure=insecure,
cacert=cacert,
auth_url=endpoint
)
return True
except Exception:
exceptions.handle(request, ignore=True)
return False
def user_update_own_password(request, origpassword, password):
client = keystoneclient(request, admin=False)
client.user_id = request.user.id
if VERSIONS.active < 3:
return client.users.update_own_password(origpassword, password)
else:
return client.users.update_password(origpassword, password)
def user_update_tenant(request, user, project, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_tenant(user, project)
else:
return manager.update(user, project=project)
def group_create(request, domain_id, name, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.create(domain=domain_id,
name=name,
description=description)
def group_get(request, group_id, admin=True):
manager = keystoneclient(request, admin=admin).groups
return manager.get(group_id)
def group_delete(request, group_id):
manager = keystoneclient(request, admin=True).groups
return manager.delete(group_id)
def group_list(request, domain=None, project=None, user=None):
manager = keystoneclient(request, admin=True).groups
groups = manager.list(user=user, domain=domain)
if project:
project_groups = []
for group in groups:
roles = roles_for_group(request, group=group.id, project=project)
if roles and len(roles) > 0:
project_groups.append(group)
groups = project_groups
return groups
def group_update(request, group_id, name=None, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.update(group=group_id,
name=name,
description=description)
def add_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.add_to_group(group=group_id, user=user_id)
def remove_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.remove_from_group(group=group_id, user=user_id)
def get_project_groups_roles(request, project):
"""Gets the groups roles in a given project.
:param request: the request entity containing the login user information
:param project: the project to filter the groups roles. It accepts both
project object resource or project ID
:returns group_roles: a dictionary mapping the groups and their roles in
given project
"""
groups_roles = collections.defaultdict(list)
project_role_assignments = role_assignments_list(request,
project=project)
for role_assignment in project_role_assignments:
if not hasattr(role_assignment, 'group'):
continue
group_id = role_assignment.group['id']
role_id = role_assignment.role['id']
# filter by project_id
if ('project' in role_assignment.scope and
role_assignment.scope['project']['id'] == project):
groups_roles[group_id].append(role_id)
return groups_roles
def role_assignments_list(request, project=None, user=None, role=None,
group=None, domain=None, effective=False,
include_subtree=True):
if VERSIONS.active < 3:
raise exceptions.NotAvailable
if include_subtree:
domain = None
manager = keystoneclient(request, admin=True).role_assignments
return manager.list(project=project, user=user, role=role, group=group,
domain=domain, effective=effective,
include_subtree=include_subtree)
def role_create(request, name):
manager = keystoneclient(request, admin=True).roles
return manager.create(name)
def role_get(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.get(role_id)
def role_update(request, role_id, name=None):
manager = keystoneclient(request, admin=True).roles
return manager.update(role_id, name)
def role_delete(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.delete(role_id)
def role_list(request):
"""Returns a global list of available roles."""
return keystoneclient(request, admin=True).roles.list()
def roles_for_user(request, user, project=None, domain=None):
"""Returns a list of user roles scoped to a project or domain."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.roles_for_user(user, project)
else:
return manager.list(user=user, domain=domain, project=project)
def get_domain_users_roles(request, domain):
users_roles = collections.defaultdict(list)
domain_role_assignments = role_assignments_list(request,
domain=domain,
include_subtree=False)
for role_assignment in domain_role_assignments:
if not hasattr(role_assignment, 'user'):
continue
user_id = role_assignment.user['id']
role_id = role_assignment.role['id']
# filter by domain_id
if ('domain' in role_assignment.scope and
role_assignment.scope['domain']['id'] == domain):
users_roles[user_id].append(role_id)
return users_roles
def add_domain_user_role(request, user, role, domain):
"""Adds a role for a user on a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role, user=user, domain=domain)
def remove_domain_user_role(request, user, role, domain=None):
"""Removes a given single role for a user from a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role, user=user, domain=domain)
def get_project_users_roles(request, project):
users_roles = collections.defaultdict(list)
if VERSIONS.active < 3:
project_users = user_list(request, project=project)
for user in project_users:
roles = roles_for_user(request, user.id, project)
roles_ids = [role.id for role in roles]
users_roles[user.id].extend(roles_ids)
else:
project_role_assignments = role_assignments_list(request,
project=project)
for role_assignment in project_role_assignments:
if not hasattr(role_assignment, 'user'):
continue
user_id = role_assignment.user['id']
role_id = role_assignment.role['id']
# filter by project_id
if ('project' in role_assignment.scope and
role_assignment.scope['project']['id'] == project):
users_roles[user_id].append(role_id)
return users_roles
def add_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Adds a role for a user on a tenant."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.add_user_role(user, role, project)
else:
return manager.grant(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Removes a given single role for a user from a tenant."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.remove_user_role(user, role, project)
else:
return manager.revoke(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user(request, project=None, user=None, domain=None):
"""Removes all roles from a user on a tenant, removing them from it."""
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user, project)
for role in roles:
remove_tenant_user_role(request, user=user, role=role.id,
project=project, domain=domain)
def roles_for_group(request, group, domain=None, project=None):
manager = keystoneclient(request, admin=True).roles
return manager.list(group=group, domain=domain, project=project)
def add_group_role(request, role, group, domain=None, project=None):
"""Adds a role for a group on a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role=role, group=group, domain=domain,
project=project)
def remove_group_role(request, role, group, domain=None, project=None):
"""Removes a given single role for a group from a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project,
domain=domain)
def remove_group_roles(request, group, domain=None, project=None):
"""Removes all roles from a group on a domain or project."""
client = keystoneclient(request, admin=True)
roles = client.roles.list(group=group, domain=domain, project=project)
for role in roles:
remove_group_role(request, role=role.id, group=group,
domain=domain, project=project)
def get_default_role(request):
"""Gets the default role object from Keystone and saves it as a global.
Since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except Exception:
roles = []
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def ec2_manager(request):
client = keystoneclient(request)
if hasattr(client, 'ec2'):
return client.ec2
# Keystoneclient 4.0 was released without the ec2 creds manager.
from keystoneclient.v2_0 import ec2
return ec2.CredentialsManager(client)
def list_ec2_credentials(request, user_id):
return ec2_manager(request).list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return ec2_manager(request).create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return ec2_manager(request).get(user_id, access_token)
def delete_user_ec2_credentials(request, user_id, access_token):
return ec2_manager(request).delete(user_id, access_token)
def keystone_can_edit_domain():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
can_edit_domain = backend_settings.get('can_edit_domain', True)
multi_domain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return can_edit_domain and multi_domain_support
def keystone_can_edit_user():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_user', True)
def keystone_can_edit_project():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_project', True)
def keystone_can_edit_group():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_group', True)
def keystone_can_edit_role():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_role', True)
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
def get_version():
return VERSIONS.active
def is_multi_domain_enabled():
return (VERSIONS.active >= 3 and
getattr(settings, 'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT', False))
def is_federation_management_enabled():
return getattr(settings, 'OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT', False)
def identity_provider_create(request, idp_id, description=None,
enabled=False, remote_ids=None):
manager = keystoneclient(request, admin=True).federation.identity_providers
try:
return manager.create(id=idp_id,
description=description,
enabled=enabled,
remote_ids=remote_ids)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def identity_provider_get(request, idp_id):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.get(idp_id)
def identity_provider_update(request, idp_id, description=None,
enabled=False, remote_ids=None):
manager = keystoneclient(request, admin=True).federation.identity_providers
try:
return manager.update(idp_id,
description=description,
enabled=enabled,
remote_ids=remote_ids)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def identity_provider_delete(request, idp_id):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.delete(idp_id)
def identity_provider_list(request):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.list()
def mapping_create(request, mapping_id, rules):
manager = keystoneclient(request, admin=True).federation.mappings
try:
return manager.create(mapping_id=mapping_id, rules=rules)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def mapping_get(request, mapping_id):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.get(mapping_id)
def mapping_update(request, mapping_id, rules):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.update(mapping_id, rules=rules)
def mapping_delete(request, mapping_id):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.delete(mapping_id)
def mapping_list(request):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.list()
def protocol_create(request, protocol_id, identity_provider, mapping):
manager = keystoneclient(request).federation.protocols
try:
return manager.create(protocol_id, identity_provider, mapping)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def protocol_get(request, identity_provider, protocol):
manager = keystoneclient(request).federation.protocols
return manager.get(identity_provider, protocol)
def protocol_update(request, identity_provider, protocol, mapping):
manager = keystoneclient(request).federation.protocols
return manager.update(identity_provider, protocol, mapping)
def protocol_delete(request, identity_provider, protocol):
manager = keystoneclient(request).federation.protocols
return manager.delete(identity_provider, protocol)
def protocol_list(request, identity_provider):
manager = keystoneclient(request).federation.protocols
return manager.list(identity_provider)
|
apache-2.0
| 4,896,837,129,714,877,000 | 34.620469 | 79 | 0.645127 | false |
henrysher/imagefactory
|
imagefactory-plugins/OpenStackCloud/OpenStackCloud.py
|
1
|
4825
|
#!/usr/bin/python
import logging
import zope
import libxml2
from imgfac.BuildDispatcher import BuildDispatcher
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.CloudDelegate import CloudDelegate
from glance import client as glance_client
def glance_upload(image_filename, creds = {'auth_url': None, 'password': None, 'strategy': 'noauth', 'tenant': None, 'username': None},
host = "0.0.0.0", port = "9292", token = None, name = 'Factory Test Image'):
image_meta = {'container_format': 'bare',
'disk_format': 'raw',
'is_public': True,
'min_disk': 0,
'min_ram': 0,
'name': name,
'properties': {'distro': 'rhel'}}
c = glance_client.Client(host=host, port=port,
auth_tok=token, creds=creds)
image_data = open(image_filename, "r")
image_meta = c.add_image(image_meta, image_data)
image_data.close()
return image_meta['id']
class OpenStackCloud(object):
zope.interface.implements(CloudDelegate)
def __init__(self):
# Note that we are now missing ( template, target, config_block = None):
super(OpenStackCloud, self).__init__()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
# Our target_image is already a raw KVM image. All we need to do is upload to glance
self.active_image = self.builder.provider_image
self.openstack_decode_credentials(credentials)
provider_data = self.get_dynamic_provider_data("openstack-kvm")
if provider_data is None:
raise ImageFactoryException("OpenStack KVM instance not found in local configuration file /etc/imagefactory/openstack-kvm.json or as XML or JSON")
# Image is always here and it is the target_image datafile
input_image = self.builder.target_image.datafile
input_image_name = os.path.basename(input_image)
image_name = 'ImageFactory created image - %s' % (self.builder.provider_image.identifier)
image_id = glance_upload(input_image, creds = self.credentials_dict, token = self.credentials_token,
hostname=provider_data['glance-host'], port=provider_data['glance-port'])
self.builder.provider_image.target_identifier=image_id
self.builder.provider_image.provider_account_identifier=self.credentials_dict['username']
self.percent_complete=100
def openstack_decode_credentials(self, credentials):
self.activity("Preparing OpenStack credentials")
# TODO: Validate these - in particular, ensure that if some nodes are missing at least
# a minimal acceptable set of auth is present
doc = libxml2.parseDoc(credentials)
self.credentials_dict = { }
for authprop in [ 'auth_url', 'password', 'strategy', 'tenant', 'username']:
self.credentials_dict[authprop] = self._get_xml_node(doc, authprop)
self.credentials_token = self._get_xml_node(doc, 'token')
def _get_xml_node(self, doc, credtype):
nodes = doc.xpathEval("//provider_credentials/openstack_credentials/%s" % (credtype))
# OpenStack supports multiple auth schemes so not all nodes are required
if len(nodes) < 1:
return None
return nodes[0].content
def snapshot_image_on_provider(self, builder, provider, credentials, template, parameters):
# TODO: Implement snapshot builds
raise ImageFactoryException("Snapshot builds not currently supported on OpenStack KVM")
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
pass
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
pass
def get_dynamic_provider_data(self, provider):
try:
xml_et = fromstring(provider)
return xml_et.attrib
except Exception as e:
self.log.debug('Testing provider for XML: %s' % e)
pass
try:
jload = json.loads(provider)
return jload
except ValueError as e:
self.log.debug('Testing provider for JSON: %s' % e)
pass
return None
|
apache-2.0
| 2,142,119,869,268,620,800 | 40.956522 | 158 | 0.654301 | false |
google-research/open-covid-19-data
|
src/pipeline/config.py
|
1
|
5665
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
DATA_YAML = os.path.abspath(os.path.join(__file__, '../../config/data.yaml'))
ALLOWLIST_YAML = os.path.abspath(os.path.join(__file__, '../../config/allowlist.yaml'))
SOURCES_DIR = os.path.abspath(os.path.join(__file__, '../../config/sources'))
DATA_INPUTS_DIR = os.path.abspath(os.path.join(__file__, '../../../data/inputs/'))
def read_data_schema():
with open(DATA_YAML) as file:
schema = yaml.load(file, Loader=yaml.FullLoader)
return schema
def all_data_schema_columns():
column_list = []
schema = read_data_schema()
for data_type in schema.values():
columns = data_type['columns']
column_values = list(columns.values())
column_list.extend(column_values)
return column_list
def read_allowlist():
with open(ALLOWLIST_YAML) as file:
return yaml.load(file, Loader=yaml.FullLoader)
def all_region_columns():
return ['region_code'] + other_region_columns()
def other_region_columns():
return ['parent_region_code',
'region_code_type',
'region_code_level',
'level_1_region_code',
'level_2_region_code',
'level_3_region_code']
def get_sources_with_data():
downloaded_dir = os.path.join(DATA_INPUTS_DIR, 'downloaded')
scraped_dir = os.path.join(DATA_INPUTS_DIR, 'scraped')
downloaded_sources = [f.name for f in os.scandir(downloaded_dir) if f.is_dir()]
scraped_sources = [f.name for f in os.scandir(scraped_dir) if f.is_dir()]
scraped_sources.remove('spreadsheets')
result = downloaded_sources + scraped_sources
return result
def read_config(cc_by=True,
cc_by_sa=False,
cc_by_nc=False,
google_tos=False,
filter_no_load_func=True,
filter_no_data=True,
filter_not_approved=True,
filter_by_fetch_method=None):
config_dict = {}
allowlist = read_allowlist()
sources_with_data = get_sources_with_data()
for source_file_name in os.listdir(SOURCES_DIR):
source_file = os.path.join(SOURCES_DIR, source_file_name)
source_key = os.path.splitext(source_file_name)[0]
if filter_not_approved and source_key not in allowlist:
continue
with open(source_file) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
params['config_key'] = source_key
# pylint: disable=bad-continuation
if ((filter_no_load_func
and ('load' not in params or 'function' not in params['load'] or params['load']['function'] is None))
or (filter_by_fetch_method
and ('fetch' not in params or params['fetch']['method'] != filter_by_fetch_method))
or (filter_no_data and (source_key not in sources_with_data))
or (not cc_by and params['license']['cc_by'])
or (not cc_by_sa and params['license']['cc_by_sa'])
or (not cc_by_nc and params['license']['cc_by_nc'])
or (not google_tos and params['license']['google_tos'])):
continue
# pylint: enable=bad-continuation
config_dict[source_key] = params
return config_dict
def col_params_to_col_list(data_columns_params):
data_schema = read_data_schema()
column_list = []
for data_type in data_columns_params.keys():
data_type_formats = data_columns_params[data_type]
schema_cols = data_schema[data_type]['columns']
for data_type_format in data_type_formats:
col_name = schema_cols[data_type_format]
column_list.append(col_name)
return column_list
def get_data_columns_by_type():
data_columns_by_type = {}
schema = read_data_schema()
for data_type in schema.keys():
columns = list(schema[data_type]['columns'].values())
data_columns_by_type[data_type] = columns
return data_columns_by_type
def get_identifier_columns():
return ['date', 'region_code']
def get_time_series_data_types():
schema = read_data_schema()
time_series_data_types_dict = dict(filter(lambda elem: elem[1]['time_series'], schema.items()))
time_series_data_types = list(time_series_data_types_dict.keys())
return time_series_data_types
def get_rename_dict(data_columns):
schema = read_data_schema()
rename_dict = {}
for data_type in data_columns.keys():
for data_format in data_columns[data_type].keys():
our_col_name = schema[data_type]['columns'][data_format]
source_col_name = data_columns[data_type][data_format]
rename_dict[source_col_name] = our_col_name
return rename_dict
# Returns config dict where load.aggregate_data field matches aggregate_data arg.
# Note that config sources with no load field are not returned in either case.
def filter_by_aggregate_data(config_dict, aggregate_data=True):
filtered_dict = {
source: params for (source, params) in config_dict.items()
if 'load' in params and params['load']['aggregate_data'] == aggregate_data
}
return filtered_dict
|
apache-2.0
| -7,679,367,097,901,189,000 | 39.464286 | 114 | 0.643071 | false |
adityahase/frappe
|
frappe/contacts/doctype/contact/contact.py
|
1
|
8077
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, has_gravatar, cint
from frappe import _
from frappe.model.document import Document
from frappe.core.doctype.dynamic_link.dynamic_link import deduplicate_dynamic_links
from six import iteritems
from past.builtins import cmp
from frappe.model.naming import append_number_if_name_exists
from frappe.contacts.address_and_contact import set_link_title
import functools
class Contact(Document):
def autoname(self):
# concat first and last name
self.name = " ".join(filter(None,
[cstr(self.get(f)).strip() for f in ["first_name", "last_name"]]))
if frappe.db.exists("Contact", self.name):
self.name = append_number_if_name_exists('Contact', self.name)
# concat party name if reqd
for link in self.links:
self.name = self.name + '-' + link.link_name.strip()
break
def validate(self):
self.set_primary_email()
self.set_primary("phone")
self.set_primary("mobile_no")
self.set_user()
set_link_title(self)
if self.email_id and not self.image:
self.image = has_gravatar(self.email_id)
if self.get("sync_with_google_contacts") and not self.get("google_contacts"):
frappe.throw(_("Select Google Contacts to which contact should be synced."))
deduplicate_dynamic_links(self)
def set_user(self):
if not self.user and self.email_id:
self.user = frappe.db.get_value("User", {"email": self.email_id})
def get_link_for(self, link_doctype):
'''Return the link name, if exists for the given link DocType'''
for link in self.links:
if link.link_doctype==link_doctype:
return link.link_name
return None
def has_link(self, doctype, name):
for link in self.links:
if link.link_doctype==doctype and link.link_name== name:
return True
def has_common_link(self, doc):
reference_links = [(link.link_doctype, link.link_name) for link in doc.links]
for link in self.links:
if (link.link_doctype, link.link_name) in reference_links:
return True
def add_email(self, email_id, is_primary=0, autosave=False):
if not frappe.db.exists("Contact Email", {"email_id": email_id, "parent": self.name}):
self.append("email_ids", {
"email_id": email_id,
"is_primary": is_primary
})
if autosave:
self.save(ignore_permissions=True)
def add_phone(self, phone, is_primary_phone=0, is_primary_mobile_no=0, autosave=False):
if not frappe.db.exists("Contact Phone", {"phone": phone, "parent": self.name}):
self.append("phone_nos", {
"phone": phone,
"is_primary_phone": is_primary_phone,
"is_primary_mobile_no": is_primary_mobile_no
})
if autosave:
self.save(ignore_permissions=True)
def set_primary_email(self):
if not self.email_ids:
self.email_id = ""
return
if len([email.email_id for email in self.email_ids if email.is_primary]) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold("Email ID")))
for d in self.email_ids:
if d.is_primary == 1:
self.email_id = d.email_id.strip()
break
def set_primary(self, fieldname):
# Used to set primary mobile and phone no.
if len(self.phone_nos) == 0:
setattr(self, fieldname, "")
return
field_name = "is_primary_" + fieldname
is_primary = [phone.phone for phone in self.phone_nos if phone.get(field_name)]
if len(is_primary) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold(frappe.unscrub(fieldname))))
for d in self.phone_nos:
if d.get(field_name) == 1:
setattr(self, fieldname, d.phone)
break
def get_default_contact(doctype, name):
'''Returns default contact for the given doctype, name'''
out = frappe.db.sql('''select parent,
IFNULL((select is_primary_contact from tabContact c where c.name = dl.parent), 0)
as is_primary_contact
from
`tabDynamic Link` dl
where
dl.link_doctype=%s and
dl.link_name=%s and
dl.parenttype = "Contact"''', (doctype, name))
if out:
return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(cint(y[1]), cint(x[1]))))[0][0]
else:
return None
@frappe.whitelist()
def invite_user(contact):
contact = frappe.get_doc("Contact", contact)
if not contact.email_id:
frappe.throw(_("Please set Email Address"))
if contact.has_permission("write"):
user = frappe.get_doc({
"doctype": "User",
"first_name": contact.first_name,
"last_name": contact.last_name,
"email": contact.email_id,
"user_type": "Website User",
"send_welcome_email": 1
}).insert(ignore_permissions = True)
return user.name
@frappe.whitelist()
def get_contact_details(contact):
contact = frappe.get_doc("Contact", contact)
out = {
"contact_person": contact.get("name"),
"contact_display": " ".join(filter(None,
[contact.get("salutation"), contact.get("first_name"), contact.get("last_name")])),
"contact_email": contact.get("email_id"),
"contact_mobile": contact.get("mobile_no"),
"contact_phone": contact.get("phone"),
"contact_designation": contact.get("designation"),
"contact_department": contact.get("department")
}
return out
def update_contact(doc, method):
'''Update contact when user is updated, if contact is found. Called via hooks'''
contact_name = frappe.db.get_value("Contact", {"email_id": doc.name})
if contact_name:
contact = frappe.get_doc("Contact", contact_name)
for key in ("first_name", "last_name", "phone"):
if doc.get(key):
contact.set(key, doc.get(key))
contact.flags.ignore_mandatory = True
contact.save(ignore_permissions=True)
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def contact_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
if not frappe.get_meta("Contact").get_field(searchfield)\
and searchfield not in frappe.db.DEFAULT_COLUMNS:
return []
link_doctype = filters.pop('link_doctype')
link_name = filters.pop('link_name')
return frappe.db.sql("""select
`tabContact`.name, `tabContact`.first_name, `tabContact`.last_name
from
`tabContact`, `tabDynamic Link`
where
`tabDynamic Link`.parent = `tabContact`.name and
`tabDynamic Link`.parenttype = 'Contact' and
`tabDynamic Link`.link_doctype = %(link_doctype)s and
`tabDynamic Link`.link_name = %(link_name)s and
`tabContact`.`{key}` like %(txt)s
{mcond}
order by
if(locate(%(_txt)s, `tabContact`.name), locate(%(_txt)s, `tabContact`.name), 99999),
`tabContact`.idx desc, `tabContact`.name
limit %(start)s, %(page_len)s """.format(mcond=get_match_cond(doctype), key=searchfield), {
'txt': '%' + txt + '%',
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'link_name': link_name,
'link_doctype': link_doctype
})
@frappe.whitelist()
def address_query(links):
import json
links = [{"link_doctype": d.get("link_doctype"), "link_name": d.get("link_name")} for d in json.loads(links)]
result = []
for link in links:
if not frappe.has_permission(doctype=link.get("link_doctype"), ptype="read", doc=link.get("link_name")):
continue
res = frappe.db.sql("""
SELECT `tabAddress`.name
FROM `tabAddress`, `tabDynamic Link`
WHERE `tabDynamic Link`.parenttype='Address'
AND `tabDynamic Link`.parent=`tabAddress`.name
AND `tabDynamic Link`.link_doctype = %(link_doctype)s
AND `tabDynamic Link`.link_name = %(link_name)s
""", {
"link_doctype": link.get("link_doctype"),
"link_name": link.get("link_name"),
}, as_dict=True)
result.extend([l.name for l in res])
return result
def get_contact_with_phone_number(number):
if not number: return
contacts = frappe.get_all('Contact Phone', filters=[
['phone', 'like', '%{0}'.format(number)]
], fields=["parent"], limit=1)
return contacts[0].parent if contacts else None
def get_contact_name(email_id):
contact = frappe.get_list("Contact Email", filters={"email_id": email_id}, fields=["parent"], limit=1)
return contact[0].parent if contact else None
|
mit
| 281,877,966,277,661,250 | 30.306202 | 110 | 0.684413 | false |
blacksburg98/dyplot
|
dyplot/hist.py
|
1
|
2203
|
from dyplot.c3 import C3 as Core
import numpy as np
class Hist(Core):
"""
Compute the histogram of a set of data.
"""
def __init__(self, a, bins=10, r=None, weights=None, density=None, xlabel = "", **kwargs):
"""
:param a: input data. The histogram is computed over the flattened array.
:type a: array_like
:param bins: The lower and upper range of the bins.
If not provided, range is simply (a.min(), a.max()).
Values outside the range are ignored.
:type bins: (float, float), optional
:param weights: An array of weights, of the same shape as a.
Each value in a only contributes its associated weight towards the bin count (instead of 1).
If normed is True, the weights are normalized, so that the integral of the density over the range remains 1
:type weights: bool, optional
:param density: If False, the result will contain the number of samples in each bin.
If True, the result is the value of the probability density function at the bin, normalized such that the integral over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability mass function.
Overrides the normed keyword if given.
:type density: bool, optional
:param xlabel: The name of the x label.
:type xlabel: string
"""
Core.__init__(self, option=kwargs)
hist, bin_edges = np.histogram(a, bins, r, weights, density)
h = map(int, hist)
self.option["data"]["type"] = "bar"
columns = []
columns.append(xlabel)
columns.extend(h)
self.option["data"]["columns"].append(columns)
self.option["bar"] = {}
self.option["bar"]["width"] = {}
self.option["bar"]["width"]["ratio"] = 0.5
labels = []
for i in range(0, len(bin_edges)-1):
labels.append(str(bin_edges[i]) + "-" + str(bin_edges[i+1]))
self.set_xticklabels(labels, "categories")
|
mit
| -297,714,197,932,587,460 | 51.731707 | 295 | 0.585111 | false |
ucsd-ccbb/Oncolist
|
src/restLayer/models/SearchResult.py
|
1
|
1295
|
__author__ = 'aarongary'
class SearchResultModel():
def __init__(self):
self.clusterNodeName = ''
self.searchTab = ''
self.geneScoreRangeMax = 100
self.geneSuperList = []
self.items = []
self.geneScoreRangeStep = 0.1
self.geneScoreRangeMin = 5
self.searchGroupTitle = ''
self.grouped_items = []
def loadTestData(self):
self.clusterNodeName = 'Test cluster node name'
self.searchTab = 'Test search tab'
self.geneScoreRangeMax = 99
self.geneSuperList = []
self.items = []
self.geneScoreRangeStep = 0.5
self.geneScoreRangeMin = 50
self.searchGroupTitle = 'Test search group title'
self.grouped_items = []
def toJson(self):
return_value = {
'clusterNodeName': self.clusterNodeName,
'searchTab': self.searchTab,
'geneScoreRangeMax': self.geneScoreRangeMax,
'geneSuperList': self.geneSuperList,
'items': self.items,
'geneScoreRangeStep': self.geneScoreRangeStep,
'geneScoreRangeMin': self.geneScoreRangeMin,
'searchGroupTitle': self.searchGroupTitle,
'grouped_items': self.grouped_items
}
return return_value
|
mit
| -6,604,879,068,015,613,000 | 32.205128 | 58 | 0.59305 | false |
alej0varas/django-o2o_tagging
|
o2o_tagging/models.py
|
1
|
2048
|
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from model_utils.managers import PassThroughManager
from .managers import O2OTagQuerySet
class O2OTag(models.Model):
# The object that is tagging
tagger_content_type = models.ForeignKey(ContentType,
related_name="taggers")
tagger_object_id = models.PositiveIntegerField()
tagger_content_object = generic.GenericForeignKey("tagger_content_type",
"tagger_object_id")
# The object that is tagged
tagged_content_type = models.ForeignKey(ContentType,
related_name="taggeds")
tagged_object_id = models.PositiveIntegerField()
tagged_content_object = generic.GenericForeignKey("tagged_content_type",
"tagged_object_id")
# The object where the tagged objects is tagged
tagged_in_content_type = models.ForeignKey(
ContentType,
related_name="tags")
tagged_in_object_id = models.PositiveIntegerField()
tagged_in_content_object = generic.GenericForeignKey(
"tagged_in_content_type",
"tagged_in_object_id")
created_at = models.DateTimeField(auto_now_add=True)
objects = PassThroughManager.for_queryset_class(O2OTagQuerySet)()
class Meta:
unique_together = ('tagger_content_type', 'tagger_object_id',
'tagged_content_type', 'tagged_object_id',
'tagged_in_content_type', 'tagged_in_object_id')
def __unicode__(self):
return u'%s -> %s | %s' % (self.tagger, self.tagged, self.tagged_in)
# Convenient shortcuts
@property
def tagged(self):
return self.tagged_content_object
@property
def tagger(self):
return self.tagger_content_object
@property
def tagged_in(self):
return self.tagged_in_content_object
|
gpl-3.0
| 4,474,573,030,350,421,500 | 34.929825 | 76 | 0.625 | false |
zfrenchee/pandas
|
pandas/tests/indexes/datetimes/test_arithmetic.py
|
1
|
21153
|
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if isinstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = getattr(pd.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
|
bsd-3-clause
| -6,268,567,197,909,106,000 | 38.464552 | 78 | 0.528861 | false |
manashmndl/dfvfs
|
tests/path/raw_path_spec.py
|
1
|
1149
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the RAW storage media image path specification implementation."""
import unittest
from dfvfs.path import raw_path_spec
from tests.path import test_lib
class RawPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the RAW storage media image path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = raw_path_spec.RawPathSpec(parent=self._path_spec)
self.assertNotEqual(path_spec, None)
with self.assertRaises(ValueError):
_ = raw_path_spec.RawPathSpec(parent=None)
with self.assertRaises(ValueError):
_ = raw_path_spec.RawPathSpec(parent=self._path_spec, bogus=u'BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = raw_path_spec.RawPathSpec(parent=self._path_spec)
self.assertNotEqual(path_spec, None)
expected_comparable = u'\n'.join([
u'type: TEST',
u'type: RAW',
u''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -7,268,564,294,503,144,000 | 26.357143 | 80 | 0.693647 | false |
Titan-C/selfspy
|
selfspy/activity_store.py
|
1
|
9740
|
# Copyright 2012 David Fendrich
# Copyright 2017 Oscar Najera
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import time
from datetime import datetime
from functools import reduce
NOW = datetime.now
import sqlalchemy
from selfspy import sniff_x as sniffer
from selfspy import models
from selfspy.models import Process, Window, Geometry, Click, Keys
# [65027] is AltGr in X for some ungodly reason.
SKIP_MODIFIERS = {"", "Shift_L", "Control_L", "Super_L",
"Alt_L", "Super_R", "Control_R", "Shift_R", "[65027]"}
SCROLL_BUTTONS = {4, 5, 6, 7}
SCROLL_COOLOFF = 10 # seconds
class Display:
def __init__(self):
self.proc_id = None
self.win_id = None
self.geo_id = None
class KeyPress:
def __init__(self, key, time, is_repeat):
self.key = key
self.time = time
self.is_repeat = is_repeat
class ActivityStore:
def __init__(self, db_name, encrypter=None, store_text=True, repeat_char=True):
self.session_maker = models.initialize(db_name)
models.ENCRYPTER = encrypter
self.store_text = store_text
self.repeat_char = repeat_char
self.curtext = u""
self.key_presses = []
self.mouse_path = []
self.current_window = Display()
self.last_scroll = {button: 0 for button in SCROLL_BUTTONS}
self.last_key_time = time.time()
self.last_commit = time.time()
self.started = NOW()
self.last_screen_change = None
def trycommit(self):
self.last_commit = time.time()
for _ in range(1000):
try:
self.session.commit()
break
except sqlalchemy.exc.OperationalError:
time.sleep(1)
except:
self.session.rollback()
def run(self):
self.session = self.session_maker()
self.sniffer = sniffer.Sniffer()
self.sniffer.screen_hook = self.got_screen_change
self.sniffer.key_hook = self.got_key
self.sniffer.mouse_button_hook = self.got_mouse_click
self.sniffer.mouse_move_hook = self.got_mouse_move
self.sniffer.run()
def got_screen_change(self, process_name, window_name, win_x, win_y, win_width, win_height):
"""Receives a screen change and stores any changes.
If the process or window has changed it will also store any queued pressed keys.
Keyword arguments:
process_name -- the name of the process running the current window
window_name -- the name of the window
win_x -- the x position of the window
win_y -- the y position of the window
win_width -- the width of the window
win_height -- the height of the window"""
# skip the event if same arguments as last time are passed
args = [process_name, window_name, win_x, win_y, win_width, win_height]
if self.last_screen_change == args:
return
self.last_screen_change = args
cur_process = self.session.query(
Process
).filter_by(
name=process_name
).scalar()
if not cur_process:
cur_process = Process(process_name)
self.session.add(cur_process)
cur_geometry = self.session.query(
Geometry
).filter_by(
xpos=win_x,
ypos=win_y,
width=win_width,
height=win_height
).scalar()
if not cur_geometry:
cur_geometry = Geometry(win_x, win_y, win_width, win_height)
self.session.add(cur_geometry)
cur_window = self.session.query(Window).filter_by(title=window_name,
process_id=cur_process.id).scalar()
if not cur_window:
cur_window = Window(window_name, cur_process.id)
self.session.add(cur_window)
if not (self.current_window.proc_id == cur_process.id
and self.current_window.win_id == cur_window.id):
self.trycommit()
self.store_keys() # happens before as these keypresses belong to the previous window
self.current_window.proc_id = cur_process.id
self.current_window.win_id = cur_window.id
self.current_window.geo_id = cur_geometry.id
def filter_many(self):
specials_in_row = 0
lastpress = None
newpresses = []
for press in self.key_presses:
key = press.key
if specials_in_row and key != lastpress.key:
if specials_in_row > 1:
lastpress.key = '%s]x%d>' % (
lastpress.key[:-2], specials_in_row)
newpresses.append(lastpress)
specials_in_row = 0
if len(key) > 1:
specials_in_row += 1
lastpress = press
else:
newpresses.append(press)
if specials_in_row:
if specials_in_row > 1:
lastpress.key = '%s]x%d>' % (
lastpress.key[:-2], specials_in_row)
newpresses.append(lastpress)
self.key_presses = newpresses
def store_keys(self):
""" Stores the current queued key-presses """
if self.repeat_char:
self.filter_many()
if self.key_presses:
keys = [press.key for press in self.key_presses]
timings = [press.time for press in self.key_presses]
def add(count, press): return count + (0 if press.is_repeat else 1)
nrkeys = reduce(add, self.key_presses, 0)
curtext = u""
if not self.store_text:
keys = []
else:
curtext = ''.join(keys)
self.session.add(Keys(curtext.encode('utf8'),
keys,
timings,
nrkeys,
self.started,
self.current_window.proc_id,
self.current_window.win_id,
self.current_window.geo_id))
self.trycommit()
self.started = NOW()
self.key_presses = []
self.last_key_time = time.time()
def got_key(self, keycode, state, string, is_repeat):
""" Receives key-presses and queues them for storage.
keycode is the code sent by the keyboard to represent the
pressed key state is the list of modifier keys pressed,
each modifier key should be represented with capital
letters and optionally followed by an underscore and
location specifier, i.e: SHIFT or SHIFT_L/SHIFT_R, ALT,
CTRL string is the string representation of the key press
repeat is True if the current key is a repeat sent by the
keyboard """
now = time.time()
if string in SKIP_MODIFIERS:
return
if len(state) > 1 or (len(state) == 1 and state[0] != "Shift"):
string = '<[%s: %s]>' % (' '.join(state), string)
elif len(string) > 1:
string = '<[%s]>' % string
self.key_presses.append(
KeyPress(string, now - self.last_key_time, is_repeat))
self.last_key_time = now
def store_click(self, button, x, y):
""" Stores incoming mouse-clicks """
self.session.add(Click(button,
True,
x, y,
len(self.mouse_path),
self.current_window.proc_id,
self.current_window.win_id,
self.current_window.geo_id))
self.mouse_path = []
self.trycommit()
def got_mouse_click(self, button, x, y):
"""Receives mouse clicks and sends them for storage.
Mouse buttons: left: 1, middle: 2, right: 3, scroll up: 4,
down:5, left:6, right:7 x,y are the coordinates of the
keypress press is True if it pressed down, False if
released"""
if button in [4, 5, 6, 7]:
if time.time() - self.last_scroll[button] < SCROLL_COOLOFF:
return
self.last_scroll[button] = time.time()
self.store_click(button, x, y)
def got_mouse_move(self, x, y):
""" Queues mouse movements.
x,y are the new coorinates on moving the mouse"""
self.mouse_path.append([x, y])
def close(self):
""" stops the sniffer and stores the latest keys. To be used on shutdown of program"""
self.sniffer.cancel()
self.store_keys()
def change_password(self, new_encrypter):
self.session = self.session_maker()
keys = self.session.query(Keys).all()
for k in keys:
dtext = k.decrypt_text()
dkeys = k.decrypt_keys()
k.encrypt_text(dtext, new_encrypter)
k.encrypt_keys(dkeys, new_encrypter)
self.session.commit()
|
gpl-3.0
| 3,251,141,156,850,954,000 | 33.295775 | 97 | 0.558419 | false |
pkuyym/Paddle
|
python/paddle/fluid/tests/book/notest_understand_sentiment.py
|
1
|
13573
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import paddle
import contextlib
import math
import numpy as np
import sys
import os
def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
hid_dim=32):
emb = fluid.layers.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=3,
act="tanh",
pool_type="sqrt")
conv_4 = fluid.nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=4,
act="tanh",
pool_type="sqrt")
prediction = fluid.layers.fc(input=[conv_3, conv_4],
size=class_dim,
act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,
lstm_size=128):
emb = fluid.layers.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True)
sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh')
rnn = fluid.layers.DynamicRNN()
with rnn.block():
word = rnn.step_input(sentence)
prev_hidden = rnn.memory(value=0.0, shape=[lstm_size])
prev_cell = rnn.memory(value=0.0, shape=[lstm_size])
def gate_common(ipt, hidden, size):
gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True)
gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)
return gate0 + gate1
forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell = forget_gate * prev_cell + input_gate * cell_gate
hidden = output_gate * fluid.layers.tanh(x=cell)
rnn.update_memory(prev_cell, cell)
rnn.update_memory(prev_hidden, hidden)
rnn.output(hidden)
last = fluid.layers.sequence_last_step(rnn())
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
def stacked_lstm_net(data,
label,
input_dim,
class_dim=2,
emb_dim=128,
hid_dim=512,
stacked_num=3):
assert stacked_num % 2 == 1
emb = fluid.layers.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True)
# add bias attr
# TODO(qijun) linear act
fc1 = fluid.layers.fc(input=emb, size=hid_dim)
lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim)
inputs = [fc1, lstm1]
for i in range(2, stacked_num + 1):
fc = fluid.layers.fc(input=inputs, size=hid_dim)
lstm, cell = fluid.layers.dynamic_lstm(
input=fc, size=hid_dim, is_reverse=(i % 2) == 0)
inputs = [fc, lstm]
fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max')
lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max')
prediction = fluid.layers.fc(input=[fc_last, lstm_last],
size=class_dim,
act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
def create_random_lodtensor(lod, place, low, high):
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
res = fluid.LoDTensor()
res.set(data, place)
res.set_lod([lod])
return res
def train(word_dict,
net_method,
use_cuda,
parallel=False,
save_dirname=None,
is_local=True):
BATCH_SIZE = 128
PASS_NUM = 5
dict_dim = len(word_dict)
class_dim = 2
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
if not parallel:
cost, acc_out, prediction = net_method(
data, label, input_dim=dict_dim, class_dim=class_dim)
else:
places = fluid.layers.get_places()
pd = fluid.layers.ParallelDo(places)
with pd.do():
cost, acc, _ = net_method(
pd.read_input(data),
pd.read_input(label),
input_dim=dict_dim,
class_dim=class_dim)
pd.write_output(cost)
pd.write_output(acc)
cost, acc = pd()
cost = fluid.layers.mean(cost)
acc_out = fluid.layers.mean(acc)
prediction = None
assert save_dirname is None
adagrad = fluid.optimizer.Adagrad(learning_rate=0.002)
adagrad.minimize(cost)
train_data = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=1000),
batch_size=BATCH_SIZE)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
def train_loop(main_program):
exe.run(fluid.default_startup_program())
for pass_id in xrange(PASS_NUM):
for data in train_data():
cost_val, acc_val = exe.run(main_program,
feed=feeder.feed(data),
fetch_list=[cost, acc_out])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if cost_val < 0.4 and acc_val > 0.8:
if save_dirname is not None:
fluid.io.save_inference_model(save_dirname, ["words"],
prediction, exe)
return
if math.isnan(float(cost_val)):
sys.exit("got NaN loss, training failed.")
raise AssertionError("Cost is too large for {0}".format(
net_method.__name__))
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_INIT_PORT", "6174")
pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID"))
training_role = os.getenv("TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(word_dict, use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
word_dict_len = len(word_dict)
lod = [0, 4, 10]
tensor_words = create_random_lodtensor(
lod, place, low=0, high=word_dict_len - 1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
assert feed_target_names[0] == "words"
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_words},
fetch_list=fetch_targets,
return_numpy=False)
print(results[0].lod())
np_data = np.array(results[0])
print("Inference Shape: ", np_data.shape)
print("Inference results: ", np_data)
def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
train(
word_dict,
net_method,
use_cuda,
parallel=parallel,
save_dirname=save_dirname)
infer(word_dict, use_cuda, save_dirname)
class TestUnderstandSentiment(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.word_dict = paddle.dataset.imdb.word_dict()
@contextlib.contextmanager
def new_program_scope(self):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
yield
def test_conv_cpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=convolution_net,
use_cuda=False,
save_dirname="understand_sentiment_conv.inference.model")
def test_conv_cpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=convolution_net,
use_cuda=False,
parallel=True)
@unittest.skip(reason="make CI faster")
def test_stacked_lstm_cpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=stacked_lstm_net,
use_cuda=False,
save_dirname="understand_sentiment_stacked_lstm.inference.model")
def test_stacked_lstm_cpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=stacked_lstm_net,
use_cuda=False,
parallel=True)
def test_conv_gpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=convolution_net,
use_cuda=True,
save_dirname="understand_sentiment_conv.inference.model")
def test_conv_gpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=convolution_net,
use_cuda=True,
parallel=True)
@unittest.skip(reason="make CI faster")
def test_stacked_lstm_gpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=stacked_lstm_net,
use_cuda=True,
save_dirname="understand_sentiment_stacked_lstm.inference.model")
def test_stacked_lstm_gpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=stacked_lstm_net,
use_cuda=True,
parallel=True)
@unittest.skip(reason='make CI faster')
def test_dynrnn_lstm_gpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=False)
def test_dynrnn_lstm_gpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=True)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -3,545,684,741,165,340,000 | 35.291444 | 81 | 0.568924 | false |
demonchild2112/travis-test
|
grr/server/grr_response_server/databases/mem_paths.py
|
1
|
14025
|
#!/usr/bin/env python
"""The in memory database methods for path handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.builtins import filter
from future.utils import iteritems
from future.utils import iterkeys
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Sequence
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import collection
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class _PathRecord(object):
"""A class representing all known information about particular path.
Attributes:
path_type: A path type of the path that this record corresponds to.
components: A path components of the path that this record corresponds to.
"""
def __init__(self, path_type, components):
self._path_type = path_type
self._components = components
self._path_infos = {}
self._children = set()
@property
def _stat_entries(self):
return {
ts: pi.stat_entry
for ts, pi in iteritems(self._path_infos)
if pi.stat_entry
}
@property
def _hash_entries(self):
return {
ts: pi.hash_entry
for ts, pi in iteritems(self._path_infos)
if pi.hash_entry
}
def AddStatEntry(self, stat_entry, timestamp):
"""Registers stat entry at a given timestamp."""
if timestamp in self._stat_entries:
message = ("Duplicated stat entry write for path '%s' of type '%s' at "
"timestamp '%s'. Old: %s. New: %s.")
message %= ("/".join(self._components), self._path_type, timestamp,
self._stat_entries[timestamp], stat_entry)
raise db.Error(message)
if timestamp not in self._path_infos:
path_info = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
timestamp=timestamp,
stat_entry=stat_entry)
self.AddPathInfo(path_info)
else:
self._path_infos[timestamp].stat_entry = stat_entry
def GetStatEntries(self):
return self._stat_entries.items()
def AddHashEntry(self, hash_entry, timestamp):
"""Registers hash entry at a given timestamp."""
if timestamp in self._hash_entries:
message = ("Duplicated hash entry write for path '%s' of type '%s' at "
"timestamp '%s'. Old: %s. New: %s.")
message %= ("/".join(self._components), self._path_type, timestamp,
self._hash_entries[timestamp], hash_entry)
raise db.Error(message)
if timestamp not in self._path_infos:
path_info = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
timestamp=timestamp,
hash_entry=hash_entry)
self.AddPathInfo(path_info)
else:
self._path_infos[timestamp].hash_entry = hash_entry
def GetHashEntries(self):
return self._hash_entries.items()
def ClearHistory(self):
self._path_infos = {}
def AddPathInfo(self, path_info):
"""Updates existing path information of the path record."""
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components:
message = "Incompatible path components: `%s` and `%s`"
raise ValueError(message % (self._components, path_info.components))
if path_info.timestamp in self._path_infos:
raise ValueError("PathInfo with timestamp %r was added before." %
path_info.timestamp)
new_path_info = path_info.Copy()
if new_path_info.timestamp is None:
new_path_info.timestamp = rdfvalue.RDFDatetime.Now()
self._path_infos[new_path_info.timestamp] = new_path_info
def AddChild(self, path_info):
"""Makes the path aware of some child."""
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components[:-1]:
message = "Incompatible path components, expected `%s` but got `%s`"
raise ValueError(message % (self._components, path_info.components[:-1]))
self._children.add(path_info.GetPathID())
def GetPathInfo(self, timestamp=None):
"""Generates a summary about the path record.
Args:
timestamp: A point in time from which the data should be retrieved.
Returns:
A `rdf_objects.PathInfo` instance.
"""
path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)
try:
result = self._path_infos[path_info_timestamp].Copy()
except KeyError:
result = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
directory=True)
stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries,
timestamp)
result.last_stat_entry_timestamp = stat_entry_timestamp
result.stat_entry = self._stat_entries.get(stat_entry_timestamp)
hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries,
timestamp)
result.last_hash_entry_timestamp = hash_entry_timestamp
result.hash_entry = self._hash_entries.get(hash_entry_timestamp)
return result
def GetChildren(self):
return set(self._children)
@staticmethod
def _LastEntryTimestamp(dct, upper_bound_timestamp):
"""Searches for greatest timestamp lower than the specified one.
Args:
dct: A dictionary from timestamps to some items.
upper_bound_timestamp: An upper bound for timestamp to be returned.
Returns:
Greatest timestamp that is lower than the specified one. If no such value
exists, `None` is returned.
"""
if upper_bound_timestamp is None:
upper_bound = lambda _: True
else:
upper_bound = lambda key: key <= upper_bound_timestamp
try:
return max(filter(upper_bound, iterkeys(dct)))
except ValueError: # Thrown if `max` input (result of filtering) is empty.
return None
class InMemoryDBPathMixin(object):
"""InMemoryDB mixin for path related functions."""
@utils.Synchronized
def ReadPathInfo(self, client_id, path_type, components, timestamp=None):
"""Retrieves a path info record for a given path."""
try:
path_record = self.path_records[(client_id, path_type, components)]
return path_record.GetPathInfo(timestamp=timestamp)
except KeyError:
raise db.UnknownPathError(
client_id=client_id, path_type=path_type, components=components)
@utils.Synchronized
def ReadPathInfos(self, client_id, path_type, components_list):
"""Retrieves path info records for given paths."""
result = {}
for components in components_list:
try:
path_record = self.path_records[(client_id, path_type, components)]
result[components] = path_record.GetPathInfo()
except KeyError:
result[components] = None
return result
@utils.Synchronized
def ListDescendantPathInfos(self,
client_id,
path_type,
components,
timestamp=None,
max_depth=None):
"""Lists path info records that correspond to children of given path."""
result = []
root_dir_exists = False
for path_idx, path_record in iteritems(self.path_records):
other_client_id, other_path_type, other_components = path_idx
path_info = path_record.GetPathInfo(timestamp=timestamp)
if client_id != other_client_id or path_type != other_path_type:
continue
if other_components == tuple(components):
root_dir_exists = True
if not path_info.directory:
raise db.NotDirectoryPathError(client_id, path_type, components)
if len(other_components) == len(components):
continue
if not collection.StartsWith(other_components, components):
continue
if (max_depth is not None and
len(other_components) - len(components) > max_depth):
continue
result.append(path_info)
if not root_dir_exists and components:
raise db.UnknownPathError(client_id, path_type, components)
if timestamp is None:
return sorted(result, key=lambda _: tuple(_.components))
# We need to filter implicit path infos if specific timestamp is given.
# TODO(hanuszczak): If we were to switch to use path trie instead of storing
# records by path id, everything would be much easier.
class TrieNode(object):
"""A trie of path components with path infos as values."""
def __init__(self):
self.path_info = None
self.children = {}
self.explicit = False
def Add(self, path_info, idx=0):
"""Adds given path info to the trie (or one of its subtrees)."""
components = path_info.components
if idx == len(components):
self.path_info = path_info
self.explicit |= (
path_info.HasField("stat_entry") or
path_info.HasField("hash_entry"))
else:
child = self.children.setdefault(components[idx], TrieNode())
child.Add(path_info, idx=idx + 1)
self.explicit |= child.explicit
def Collect(self, path_infos):
if self.path_info is not None and self.explicit:
path_infos.append(self.path_info)
for component in sorted(iterkeys(self.children)):
self.children[component].Collect(path_infos)
trie = TrieNode()
for path_info in result:
trie.Add(path_info)
explicit_path_infos = []
trie.Collect(explicit_path_infos)
return explicit_path_infos
def _GetPathRecord(self, client_id, path_info, set_default=True):
components = tuple(path_info.components)
path_idx = (client_id, path_info.path_type, components)
if set_default:
default = _PathRecord(
path_type=path_info.path_type, components=components)
return self.path_records.setdefault(path_idx, default)
else:
return self.path_records.get(path_idx, None)
def _WritePathInfo(self, client_id, path_info):
"""Writes a single path info record for given client."""
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
path_record = self._GetPathRecord(client_id, path_info)
path_record.AddPathInfo(path_info)
parent_path_info = path_info.GetParent()
if parent_path_info is not None:
parent_path_record = self._GetPathRecord(client_id, parent_path_info)
parent_path_record.AddChild(path_info)
@utils.Synchronized
def MultiWritePathInfos(self, path_infos):
for client_id, client_path_infos in iteritems(path_infos):
self.WritePathInfos(client_id, client_path_infos)
@utils.Synchronized
def WritePathInfos(self, client_id, path_infos):
for path_info in path_infos:
self._WritePathInfo(client_id, path_info)
for ancestor_path_info in path_info.GetAncestors():
self._WritePathInfo(client_id, ancestor_path_info)
@utils.Synchronized
def ReadPathInfosHistories(
self,
client_id,
path_type,
components_list,
cutoff = None
):
"""Reads a collection of hash and stat entries for given paths."""
results = {}
for components in components_list:
try:
path_record = self.path_records[(client_id, path_type, components)]
except KeyError:
results[components] = []
continue
entries_by_ts = {}
for ts, stat_entry in path_record.GetStatEntries():
pi = rdf_objects.PathInfo(
path_type=path_type,
components=components,
timestamp=ts,
stat_entry=stat_entry)
entries_by_ts[ts] = pi
for ts, hash_entry in path_record.GetHashEntries():
try:
pi = entries_by_ts[ts]
except KeyError:
pi = rdf_objects.PathInfo(
path_type=path_type, components=components, timestamp=ts)
entries_by_ts[ts] = pi
pi.hash_entry = hash_entry
results[components] = []
for timestamp in sorted(iterkeys(entries_by_ts)):
if cutoff is not None and timestamp > cutoff:
continue
results[components].append(entries_by_ts[timestamp])
return results
@utils.Synchronized
def ReadLatestPathInfosWithHashBlobReferences(self,
client_paths,
max_timestamp=None):
"""Returns PathInfos that have corresponding HashBlobReferences."""
results = {}
for cp in client_paths:
results[cp] = None
try:
path_record = self.path_records[(cp.client_id, cp.path_type,
cp.components)]
except KeyError:
continue
stat_entries_by_ts = {
ts: stat_entry for ts, stat_entry in path_record.GetStatEntries()
}
for ts, hash_entry in sorted(
path_record.GetHashEntries(), key=lambda e: e[0], reverse=True):
if max_timestamp is not None and ts > max_timestamp:
continue
hash_id = rdf_objects.SHA256HashID.FromBytes(
hash_entry.sha256.AsBytes())
if hash_id not in self.blob_refs_by_hashes:
continue
pi = rdf_objects.PathInfo(
path_type=cp.path_type,
components=cp.components,
timestamp=ts,
hash_entry=hash_entry)
try:
pi.stat_entry = stat_entries_by_ts[ts]
except KeyError:
pass
results[cp] = pi
break
return results
|
apache-2.0
| -6,259,611,104,042,401,000 | 32.313539 | 80 | 0.636791 | false |
s40523220/2016fallcp_hw
|
pelicanconf.py
|
1
|
2015
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'KMOL'
SITENAME = '2016Fall 課程網誌 (虎尾科大MDE)'
# 不要用文章所在目錄作為類別
USE_FOLDER_AS_CATEGORY = False
#PATH = 'content'
#OUTPUT_PATH = 'output'
TIMEZONE = 'Asia/Taipei'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('pelican-bootstrap3', 'https://github.com/DandyDev/pelican-bootstrap3/'),
('pelican-plugins', 'https://github.com/getpelican/pelican-plugins'),
('Tipue search', 'https://github.com/Tipue/Tipue-Search'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
# 必須絕對目錄或相對於設定檔案所在目錄
PLUGIN_PATHS = ['plugin']
PLUGINS = ['liquid_tags.notebook', 'summary', 'tipue_search', 'sitemap']
# for sitemap plugin
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.5,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
}
}
# search is for Tipue search
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
# for pelican-bootstrap3 theme settings
#TAG_CLOUD_MAX_ITEMS = 50
DISPLAY_CATEGORIES_ON_SIDEBAR = True
DISPLAY_RECENT_POSTS_ON_SIDEBAR = True
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_TAGS_INLINE = True
TAGS_URL = "tags.html"
CATEGORIES_URL = "categories.html"
#SHOW_ARTICLE_AUTHOR = True
#MENUITEMS = [('Home', '/'), ('Archives', '/archives.html'), ('Search', '/search.html')]
|
agpl-3.0
| -3,259,234,573,316,339,700 | 25.28169 | 88 | 0.63397 | false |
aspose-pdf/Aspose.Pdf-for-Java
|
Plugins/Aspose-Pdf-Java-for-Jython/asposepdf/WorkingWithDocumentObject/AddJavascript.py
|
1
|
1156
|
from asposepdf import Settings
from com.aspose.pdf import Document
from com.aspose.pdf import JavascriptAction
class AddJavascript:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithDocumentObject/AddJavascript/'
# Open a pdf document.
doc = Document(dataDir + "input1.pdf")
# Adding JavaScript at Document Level
# Instantiate JavascriptAction with desried JavaScript statement
javaScript = JavascriptAction("this.print({bUI:true,bSilent:false,bShrinkToFit:true})")
# Assign JavascriptAction object to desired action of Document
doc.setOpenAction(javaScript)
# Adding JavaScript at Page Level
doc.getPages().get_Item(2).getActions().setOnOpen(JavascriptAction("app.alert('page 2 is opened')"))
doc.getPages().get_Item(2).getActions().setOnClose(JavascriptAction("app.alert('page 2 is closed')"))
# Save PDF Document
doc.save(dataDir + "JavaScript-Added.pdf")
print "Added JavaScript Successfully, please check the output file."
if __name__ == '__main__':
AddJavascript()
|
mit
| 1,258,187,660,439,364,600 | 36.6 | 109 | 0.66436 | false |
vishu-guntupalli/spring-batch-dashboard
|
spring_batch_dashboard/spring_batch_dashboard/settings.py
|
1
|
3308
|
"""
Django settings for spring_batch_dashboard project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o6jj4d3z#p=%(8d2u_a=rrh)@6b@mrnbk*%xsy)d0e_66(jib0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_REDIRECT_URL = '/dashboard/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dashboard',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spring_batch_dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['dashboard/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spring_batch_dashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sys',
'USER': 'root',
'PASSWORD': 'spring-batch',
'HOST': 'localhost',
'PORT': '3306',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -5,535,245,436,612,907,000 | 24.84375 | 91 | 0.678053 | false |
countrymarmot/mccdaq
|
setup.py
|
1
|
2031
|
#!/usr/bin/env python
"""Build script for mccdaq.
unzip mccdaq-x.x.zip
>>>> setup.py install
mccdaq-x.x.win32-py2.x.exe
>>>> setup.py bdist_wininst --bitmap mccdaq.bmp
"""
from distutils.core import setup, Extension
import os, re, sys, string
VERSION_MAJOR = 0
VERSION_MINOR = 2
mccdaq_version = str(VERSION_MAJOR) + "." + str(VERSION_MINOR)
ext_modules = Extension('_mccdaq',
sources = ['src/win32/_mccdaq.c'],
libraries = ['cbw32'],
#libraries = ['src/win32/cbw32.lib'],
library_dirs = ['src/win32'],
)
setup(
name = "mccdaq",
version = mccdaq_version,
description = "USB-DAQ interface MCCDAQ .",
author = "Philippe Dalet",
author_email = "philippe.dalet@ac-toulouse.fr",
url = "http://gpib82357a.sourceforge.net/mccdaq.htm",
download_url = "http://sourceforge.net/projects/gpib82357a/" ,
long_description =
'''MCCDAQ is an interface USB-DAQ running only on windows (XP pro,W2K,vista)
- Connect MCCDAQ (USB-1208LS) on USB port
- Install driver (see CDROM for MCCDAQ) 8.6
- Install this python package mccdaq.
''',
packages = ["mccdaq"],
license="Python",
platforms="Windows",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries :: Python Modules',
],
data_files=[
('doc/mccdaq',['doc/mccdaq.bmp'],['doc/USB-1208LS_block.pdf'],
['doc/USB-1208LS_pins.pdf']
)
],
ext_modules = [ext_modules]
)
|
bsd-3-clause
| 5,043,339,858,986,041,000 | 30.246154 | 84 | 0.583949 | false |
linuxwhatelse/plugin.audio.linuxwhatelse.gmusic
|
addon/routes/actions.py
|
1
|
21782
|
import os
import json
import shutil
import urlparse
import re
import uuid
import codecs
import xbmc
import xbmcgui
import xbmcplugin
import mapper
import gmusicapi
from addon.gmusic_wrapper import GMusic
from addon import utils
from addon import listing
from addon import thumbs
from addon import ADDON
from addon import URL
from addon import ADDON_HANDLE
MPR = mapper.Mapper.get()
GMUSIC = GMusic.get(debug_logging=False)
_CACHE_DIR = utils.get_cache_dir()
@MPR.s_url('/setup/', type_cast={'force': bool})
def setup(force=False):
is_setup = True if ADDON.getSetting('is_setup') == 'true' else False
if is_setup and not force:
return True
dialog = xbmcgui.Dialog()
username = dialog.input(utils.translate(30075),
type=xbmcgui.INPUT_ALPHANUM)
if not username:
return False
# If 2-Factor Authentication is used
is_two_factor = dialog.yesno(
utils.translate(30071), utils.translate(30072))
if is_two_factor:
if not dialog.ok(utils.translate(30071),
utils.translate(30073), utils.translate(30074)):
return False
password = dialog.input(utils.translate(30076),
type=xbmcgui.INPUT_ALPHANUM,
option=xbmcgui.ALPHANUM_HIDE_INPUT)
if not password:
return False
device_id = None
if is_two_factor:
# If Android Device available
if dialog.yesno(utils.translate(30077), utils.translate(30078)):
if not dialog.ok(utils.translate(30079), utils.translate(30081)):
return False
device_id = dialog.input(utils.translate(30084),
type=xbmcgui.INPUT_ALPHANUM)
if not device_id:
return False
else:
# If using MAC-Address
if dialog.yesno(utils.translate(30082), utils.translate(30083)):
device_id = gmusicapi.Mobileclient.FROM_MAC_ADDRESS
else:
return False
else:
web = gmusicapi.Webclient()
if not web.login(username, password):
# If re-run setup due to login failed
if dialog.yesno(utils.translate(30048), utils.translate(30085)):
return setup(force=True)
else:
return False
try:
devices = web.get_registered_devices()
if not devices:
raise
dev_list = []
for dev in devices:
# Not an Android Device so we skip as streaming would not work
if dev['deviceType'] != 2:
continue
if 'id' in dev and dev['id']:
dev_list.append('%s - %s' % (
dev.get('carrier', '').strip(' '),
dev.get('model', '').strip(' '),
))
dev_list = sorted(dev_list)
if len(dev_list) <= 0:
raise
elif len(dev_list) == 1:
device_id = devices[0]['id'].lstrip('0x')
else:
selection = dialog.select(utils.translate(30042), dev_list, 0)
if selection >= 0:
device_id = devices[selection]['id'].lstrip('0x')
else:
return False
except Exception:
# If use MAC-Address instead due to no devices found
if not dialog.yesno(utils.translate(30079), utils.translate(30097)):
return False
device_id = gmusicapi.Mobileclient.FROM_MAC_ADDRESS
# Test login
mobile = gmusicapi.Mobileclient()
if mobile.login(username, password, device_id):
# Test if this is an all-access account
if not mobile.get_all_stations():
dialog.ok(utils.translate(30091), utils.translate(30092))
return False
ADDON.setSetting('username', username)
ADDON.setSetting('password', password)
ADDON.setSetting('authtoken', mobile.session._authtoken)
if device_id == gmusicapi.Mobileclient.FROM_MAC_ADDRESS:
mac_address = ''.join(re.findall('..', '%012x' % uuid.getnode()))
ADDON.setSetting('device_id', mac_address)
else:
ADDON.setSetting('device_id', device_id)
ADDON.setSetting('is_setup', 'true')
utils.notify(utils.translate(30086), utils.translate(30087))
return True
else:
# If re-run setup
if dialog.yesno(utils.translate(30048), utils.translate(30085)):
return setup(force=True)
return False
##############
## PLAYBACK ##
##############
def _get_track_details(track_id, store_id=None):
cache = os.path.join(utils.get_cache_dir(['tracks']), track_id)
if os.path.exists(cache):
with open(cache, 'r') as f:
track = json.loads(f.read())
elif store_id:
try:
track = GMUSIC.get_track_info(store_track_id=track_id)
except Exception:
pass
else:
track = None
return track
@MPR.s_url('/play/track/<track_id>/')
def play_track(track_id, store_id=None, track_title='', station_id=None):
track = _get_track_details(track_id, store_id)
item = None
if track:
item = listing.build_song_listitems([track])[0][1]
else:
item = xbmcgui.ListItem(track_title)
item.setArt({
'thumb': thumbs.IMG_ALBUM,
'poster': thumbs.IMG_ALBUM
})
item.setPath(
GMUSIC.get_stream_url(
song_id=track_id,
quality=ADDON.getSetting('stream_quality')
)
)
xbmcplugin.setResolvedUrl(ADDON_HANDLE, True, item)
GMUSIC.increment_song_playcount(track_id)
# If the current track is from a station and within the last five (5)
# playlist tracks, we get a new set of tracks for this station and
# add it to the playlist.
if station_id:
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
if playlist.getposition() >= (len(playlist) - 5):
queue_station(station_id)
@MPR.s_url('/play/album/<album_id>/')
def play_album(album_id):
# _play uses the previous url and matches it to one of `files.py`
_play(path=['album', album_id])
@MPR.s_url('/play/playlist/')
def play_playlist(playlist_id=None, playlist_token=None):
# _play uses the previous url and matches it to one of `files.py`
_play(['playlist'])
@MPR.s_url('/play/station/')
def play_station(station_id=None, station_name=None, artist_id=None,
album_id=None, genre_id=None, track_id=None,
curated_station_id=None, playlist_token=None):
# Shuffle and Repeat make no sense what so ever when starting a station
utils.execute_jsonrpc('Player.SetShuffle',
{'playerid': xbmc.PLAYLIST_MUSIC, 'shuffle': False})
utils.execute_jsonrpc('Player.SetRepeat',
{'playerid': xbmc.PLAYLIST_MUSIC, 'repeat': 'off'})
# _play uses the previous url and matches it to one of `files.py`
_play(['station'])
def _play(path):
utils.execute_jsonrpc(method='Playlist.Clear',
params={'playlistid': xbmc.PLAYLIST_MUSIC})
utils.execute_jsonrpc(
method='Playlist.Add',
params={
'playlistid': xbmc.PLAYLIST_MUSIC,
'item': {
'directory': utils.build_url(url=URL, paths=path, r_path=True)
}
}
)
utils.execute_jsonrpc(
method='Player.Open',
params={'item': {'playlistid': xbmc.PLAYLIST_MUSIC, 'position': 0}}
)
if ADDON.getSetting('auto_fullscreen') == 'true':
utils.execute_jsonrpc('GUI.SetFullscreen', {'fullscreen': True})
#############
## QUEUING ##
#############
@MPR.s_url('/queue/track/<track_id>/')
def queue_track(track_id, track_title='', play_next=False):
track = _get_track_details(track_id)
path = None
item = None
if track:
listitem = listing.build_song_listitems([track])[0]
path = listitem[0]
item = listitem[1]
else:
item = xbmcgui.ListItem(track_title)
item.setArt({
'thumb': thumbs.IMG_ALBUM,
'poster': thumbs.IMG_ALBUM
})
path = utils.build_url(
url=URL,
paths=['play', 'track', track_id],
queries={'track_title': track_title},
r_path=True,
r_query=True
)
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
position = len(playlist) + 1
if play_next:
position = playlist.getposition() + 1
playlist.add(URL=path, listitem=item, index=position)
@MPR.s_url('/queue/album/<album_id>/')
def queue_album(album_id, play_next=False):
_queue(['album', album_id], play_next)
@MPR.s_url('/queue/playlist/<playlist_id>/')
def queue_playlist(playlist_id, play_next=False):
_queue(['playlist', playlist_id], play_next)
@MPR.url('^/queue/station/$')
def queue_station(station_id=None, station_name=None, artist_id=None,
album_id=None, genre_id=None, track_id=None,
curated_station_id=None, play_next=False):
_queue(['station'])
def _queue(path, play_next=False):
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
position = len(playlist) + 1
if play_next:
position = playlist.getposition() + 1
query = dict(urlparse.parse_qsl(urlparse.urlparse(URL).query))
if 'play_next' in query:
del query['play_next']
position = playlist.getposition() + 1
utils.execute_jsonrpc(
method='Playlist.Insert',
params={
'playlistid': xbmc.PLAYLIST_MUSIC,
'position': position,
'item': {
'directory': utils.build_url(
url=URL,
paths=path,
queries=query,
r_path=True,
r_query=True
)
}
}
)
############
## SEARCH ##
############
@MPR.s_url('/search/history/')
def search_history():
history = _get_search_history()
# Add "New Search" to the list
item = xbmcgui.ListItem(utils.translate(30053))
item.setArt({
'thumb': thumbs.IMG_SEARCH,
'poster': thumbs.IMG_SEARCH
})
items = [(
utils.build_url(
url=URL,
paths=['search', 'new'],
r_path=True,
r_query=True
),
item,
True
)]
for hist in history:
item = xbmcgui.ListItem(hist)
item.setArt({
'thumb': thumbs.IMG_SEARCH,
'poster': thumbs.IMG_SEARCH
})
items.append((
utils.build_url(
url=URL,
paths=['search', hist],
r_path=True,
r_query=True
),
item,
True
))
listing.list_items(items)
@MPR.s_url('/search/new/')
def search_new():
keyboard = xbmc.Keyboard()
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
query = keyboard.getText()
else:
# User canceled or used a empty search-string
return
search(query)
@MPR.s_url('/search/<query>/')
def search(query):
history_file = os.path.join(_CACHE_DIR, 'search_history.json')
history = _get_search_history()
# It was a new search so we add it to the history
if query.decode('utf-8').lower() not in (hist.lower() for hist in history):
history.insert(0, query)
with codecs.open(history_file, 'w+', encoding='utf-8') as f:
f.write(json.dumps(history[:20], indent=2))
result = GMUSIC.search(query)
if not result:
listing.list_items([])
return
items = []
if 'artist_hits' in result and len(result['artist_hits']) > 0:
item = xbmcgui.ListItem('%s (%s)' % (utils.translate(30022),
len(result['artist_hits'])))
item.setArt({
'thumb': thumbs.IMG_ARTIST,
'poster': thumbs.IMG_ARTIST
})
items.append((
utils.build_url(
url=URL,
paths=['search', 'artists', query],
r_path=True,
r_query=True
),
item,
True
))
if 'album_hits' in result and len(result['album_hits']) > 0:
item = xbmcgui.ListItem('%s (%s)' % (utils.translate(30023),
len(result['album_hits'])))
item.setArt({
'thumb': thumbs.IMG_ALBUM,
'poster': thumbs.IMG_ALBUM
})
items.append((
utils.build_url(
url=URL,
paths=['search', 'albums', query],
r_path=True,
r_query=True
),
item,
True
))
if 'playlist_hits' in result and len(result['playlist_hits']) > 0:
item = xbmcgui.ListItem('%s (%s)' % (utils.translate(30020),
len(result['playlist_hits'])))
item.setArt({
'thumb': thumbs.IMG_PLAYLIST,
'poster': thumbs.IMG_PLAYLIST
})
items.append((
utils.build_url(
url=URL,
paths=['search', 'playlists', query],
r_path=True,
r_query=True
),
item,
True
))
if 'station_hits' in result and len(result['station_hits']) > 0:
item = xbmcgui.ListItem('%s (%s)' % (utils.translate(30021),
len(result['station_hits'])))
item.setArt({
'thumb': thumbs.IMG_STATION,
'poster': thumbs.IMG_STATION
})
items.append((
utils.build_url(
url=URL,
paths=['search', 'stations', query],
r_path=True,
r_query=True
),
item,
True
))
if 'song_hits' in result and len(result['song_hits']) > 0:
item = xbmcgui.ListItem('%s (%s)' % (utils.translate(30024),
len(result['song_hits'])))
item.setArt({
'thumb': thumbs.IMG_TRACK,
'poster': thumbs.IMG_TRACK
})
items.append((
utils.build_url(
url=URL,
paths=['search', 'songs', query],
r_path=True,
r_query=True
),
item,
True
))
listing.list_items(items)
@MPR.s_url('/search/artists/<query>/')
def search_artists(query):
if query:
result = GMUSIC.search(query)
else:
result = GMUSIC.search(cached=True)
if result:
items = listing.build_artist_listitems(result['artist_hits'])
listing.list_artists(items)
@MPR.s_url('/search/albums/<query>/')
def search_albums(query):
if query:
result = GMUSIC.search(query)
else:
result = GMUSIC.search(cached=True)
if result:
items = listing.build_album_listitems(result['album_hits'])
listing.list_albums(items)
@MPR.s_url('/search/playlists/<query>/')
def search_playlists(query):
if query:
result = GMUSIC.search(query)
else:
result = GMUSIC.search(cached=True)
if result:
items = listing.build_playlist_listitems(result['playlist_hits'])
listing.list_playlists(items)
@MPR.s_url('/search/stations/<query>/')
def search_stations(query):
if query:
result = GMUSIC.search(query)
else:
result = GMUSIC.search(cached=True)
if result:
items = listing.build_station_listitems(result['station_hits'])
listing.list_stations(items)
@MPR.s_url('/search/songs/<query>/')
def search_songs(query):
if query:
result = GMUSIC.search(query)
else:
result = GMUSIC.search(cached=True)
if result:
items = listing.build_song_listitems(result['song_hits'])
listing.list_songs(items)
def _get_search_history():
history_file = os.path.join(_CACHE_DIR, 'search_history.json')
history = []
if os.path.exists(history_file):
with codecs.open(history_file, 'r', encoding='utf-8') as f:
try:
history = json.loads(f.read())
except ValueError:
pass
return history
###################
## MISCELLANEOUS ##
###################
@MPR.s_url('/my-library/update/')
def my_library_update():
utils.notify(utils.translate(30030), utils.translate(30043))
GMUSIC.get_my_library_songs(from_cache=False)
GMUSIC.get_my_library_artists(from_cache=False)
GMUSIC.get_my_library_albums(from_cache=False)
utils.notify(utils.translate(30030), utils.translate(30044))
xbmc.executebuiltin('Container.Refresh')
@MPR.s_url('/my-library/add/track/<track_id>/')
@MPR.s_url('/my-library/add/album/<album_id>/')
def my_library_add(album_id=None, track_id=None):
if track_id:
GMUSIC.add_store_track(track_id)
elif album_id:
album = GMUSIC.get_album_info(album_id=album_id, include_tracks=True)
for track in album['tracks']:
if 'storeId' in track:
GMUSIC.add_store_track(track['storeId'])
if xbmcgui.Dialog().yesno(heading=utils.translate(30030),
line1=utils.translate(30065)):
my_library_update()
@MPR.s_url('/my-library/remove/track/<track_id>/')
@MPR.s_url('/my-library/remove/album/<album_id>/')
def my_library_remove(album_id=None, track_id=None):
if not album_id and not track_id:
return
if not xbmcgui.Dialog().yesno(heading=utils.translate(30061),
line1=utils.translate(30063)):
return
if album_id:
GMUSIC.delete_album(album_id)
elif track_id:
GMUSIC.delete_songs([track_id])
if xbmcgui.Dialog().yesno(heading=utils.translate(30030),
line1=utils.translate(30065)):
my_library_update()
@MPR.s_url('/my-library/playlist/add/')
def my_library_playlist_add(playlist_id=None, album_id=None, track_id=None):
# In case no playlist_id is specified we guide the user through
# the process of selecting one.
# He will also have the ability to create a new one
if not playlist_id:
action_dialog = xbmcgui.Dialog()
playlists = GMUSIC.get_user_playlists()
playlist_names = []
playlist_ids = []
for playlist in playlists:
if playlist['type'] != 'USER_GENERATED':
continue
playlist_names.append(playlist['name'])
playlist_ids.append(playlist['id'])
playlist_names.insert(0, utils.translate(30052))
selection = action_dialog.select(utils.translate(30020),
playlist_names, 0)
if selection == -1:
return
if selection == 0:
keyboard = xbmc.Keyboard()
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
playlist_id = GMUSIC.create_playlist(name=keyboard.getText())
else:
playlist_id = playlist_ids[selection-1]
if playlist_id:
if track_id:
GMUSIC.add_songs_to_playlist(playlist_id=playlist_id,
song_ids=track_id)
elif album_id:
album = GMUSIC.get_album_info(
album_id=album_id, include_tracks=True)
track_ids = []
for track in album['tracks']:
if 'storeId' in track:
track_ids.append(track['storeId'])
GMUSIC.add_songs_to_playlist(playlist_id=playlist_id,
song_ids=track_ids)
@MPR.s_url('/my-library/playlist/remove/')
def my_library_playlist_remove(entry_id):
if xbmcgui.Dialog().yesno(heading=utils.translate(30062),
line1=utils.translate(30064)):
GMUSIC.remove_entries_from_playlist([entry_id])
xbmc.executebuiltin('Container.Refresh')
@MPR.s_url('/my-library/playlist/delete/')
def my_library_playlist_delete(playlist_id):
if xbmcgui.Dialog().yesno(heading=utils.translate(30068),
line1=utils.translate(30069)):
GMUSIC.delete_playlist(playlist_id)
xbmc.executebuiltin('Container.Refresh')
@MPR.s_url('/rate/')
def rate(track_id):
rating = [
utils.translate(30027), # Thumbs up
utils.translate(30028), # No Thumbs
utils.translate(30029), # Thumbs down
]
dialog = xbmcgui.Dialog()
selection = dialog.select(utils.translate(30041), rating, 0)
if selection == -1:
return
song = GMUSIC.get_track_info(track_id)
if not song:
return
if selection == 0:
GMUSIC.rate_songs(song, 5)
elif selection == 1:
GMUSIC.rate_songs(song, 0)
elif selection == 2:
GMUSIC.rate_songs(song, 1)
utils.notify(utils.translate(30099), "")
@MPR.s_url('/clear/cache/')
def clear_cache():
if os.path.exists(_CACHE_DIR):
shutil.rmtree(_CACHE_DIR)
utils.notify(utils.translate(30094), '', display_time=1000)
@MPR.s_url('/clear/search-history/')
def clear_search_history():
history_file = os.path.join(_CACHE_DIR, 'search_history.json')
if os.path.exists(history_file):
os.remove(history_file)
utils.notify(utils.translate(30095), '', display_time=1000)
|
gpl-3.0
| 2,312,730,938,915,822,000 | 27.325098 | 80 | 0.553485 | false |
hacklabr/timtec
|
accounts/forms.py
|
1
|
4697
|
# -*- coding: utf-8 -*-
from __builtin__ import super
from django.contrib.auth import get_user_model
from django import forms
from localflavor.br.forms import BRCPFField
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from timtec.settings import ACCOUNT_REQUIRED_FIELDS as fields
User = get_user_model()
class BaseProfileEditForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BaseProfileEditForm, self).__init__(*args, **kwargs)
for field in fields:
try:
self.fields[field].required = True
self.fields[field].blank = False
except KeyError:
pass
class ProfileEditForm(BaseProfileEditForm):
email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$")
cpf = BRCPFField()
password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False)
password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False)
class Meta:
model = get_user_model()
fields = ('username', 'email', 'first_name', 'last_name', 'picture',
'occupation', 'city', 'site', 'biography', 'cpf')
def clean(self):
cleaned_data = super(ProfileEditForm, self).clean()
if not self.is_valid():
self.cleaned_data['picture'] = self.instance.picture
return cleaned_data
# FIXME: username should be actually cleaned
def clean_username(self):
return self.instance.username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfileEditForm, self).save(commit=commit)
class ProfileEditAdminForm(BaseProfileEditForm):
email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$")
cpf = BRCPFField()
password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False)
password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False)
class Meta:
model = get_user_model()
widgets = {
'groups' : forms.SelectMultiple(attrs={'class' : 'form-control',
'size' : 10 })
}
fields = ('username', 'email', 'first_name', 'last_name', 'picture',
'groups', 'occupation', 'city', 'site', 'biography', 'cpf')
def clean(self):
cleaned_data = super(ProfileEditAdminForm, self).clean()
if not self.is_valid():
self.cleaned_data['picture'] = self.instance.picture
return cleaned_data
# FIXME: username should be actually cleaned
def clean_username(self):
return self.instance.username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfileEditAdminForm, self).save(commit=commit)
class AcceptTermsForm(forms.Form):
accept_terms = forms.BooleanField(label=_('Accept '), initial=False, required=False)
def clean_accept_terms(self):
data = self.cleaned_data['accept_terms']
if settings.TERMS_ACCEPTANCE_REQUIRED and not data:
raise forms.ValidationError(_('You must agree to the Terms of Use to use %(site_name)s.'),
params={'site_name': settings.SITE_NAME},)
return self.cleaned_data['accept_terms']
class SignupForm(ProfileEditForm, AcceptTermsForm):
occupation = forms.CharField()
institution = forms.CharField()
class Meta:
model = get_user_model()
fields = ('username', 'email', 'occupation', 'cpf', 'institution',)
def signup(self, request, user):
user.accepted_terms = self.cleaned_data['accept_terms']
user.institution = self.cleaned_data['institution']
user.occupation = self.cleaned_data['occupation']
user.cpf = self.cleaned_data['cpf']
user.save()
|
agpl-3.0
| 5,552,133,178,344,531,000 | 35.410853 | 106 | 0.631254 | false |
rfk/git-remote-hg
|
git_remote_hg/test.py
|
1
|
1101
|
"""
git_remote_hg.test: testcases for git_remote_hg
================================================
Actually there are no "tests" as such just yet. This is simply here out of
habit, since I use it to sync the main docstring with README.rst.
"""
import os
import unittest
import git_remote_hg
class TestDocstring(unittest.TestCase):
def test_readme_matches_docstring(self):
"""Ensure that the README is in sync with the docstring.
This test should always pass; if the README is out of sync it just
updates it with the contents of git_remote_hg.__doc__.
"""
dirname = os.path.dirname
readme = os.path.join(dirname(dirname(__file__)),"README.rst")
if not os.path.isfile(readme):
f = open(readme,"wb")
f.write(git_remote_hg.__doc__.encode())
f.close()
else:
f = open(readme,"rb")
if f.read() != git_remote_hg.__doc__:
f.close()
f = open(readme,"wb")
f.write(git_remote_hg.__doc__.encode())
f.close()
|
mit
| 5,540,166,440,208,211,000 | 27.973684 | 75 | 0.551317 | false |
GrAndSE/lighty-template
|
setup.py
|
1
|
2990
|
#!/usr/bin/env python
"""
Lighty-template
~~~~~~~~~~~~~~~
Lighty-template is very simple template engine for python (python.org).
Template syntax looks like django-template or jinja2 template. But template
engine code is easier and gives a way to write all needed tags without any
hacks.
Now it does not include all features django-template or jinja2 supports, but
I'll try to fix it as soon as possible.
Features:
---------
- Stupid simple syntax almost compatible with django-template.
- Pure python.
- Supports both Python 2 (checked with 2.7.2) and Python 3 (checked with 3.2.2)
- Fast. From 3 to 10 times faster than django-template and even faster on some
benchmarks than jinja2 (but in one benchmark 2 times slower).
- Simple and compact code.
- Template filters with multiply arguments.
- Basic template filters included (now just 14 template filters).
- Basic template tags included.
- Simple but powerfull tag declaration - it's easy to create your own block
tags with writing single function.
- Custom template tags can modify template on fly.
Example:
--------
Here a small template example:
<!DOCTYPE html>
<html>
<head>
<title>{{ title }}</title>
{% block style %}{% endblock %}
{% block script %}{% endblock %}
</head>
<body>
{% block content %}
<h1>Hello, {{ name }}!</h1>
<p>Some text here</p>
{% endblock %}
{% include "includes/footer.html" %}
</body>
</html>
TODO:
-----
- More default tags (now there is no load tags, and if and for tags was
simplified and requires additional work).
- More default filters (strings saving, etc.)
- Some additional execution optimizations.
- More tests (in progress).
- Documentation.
- Thinking about unicode and escaping.
"""
from distutils.core import setup
setup(
name='lighty-template',
version='0.3.4',
description='Simple template engine for python',
long_description=__doc__,
keywords='Template HTML XML',
author='Andrey Grygoryev',
author_email='undeadgrandse@gmail.com',
license='BSD',
url='https://github.com/GrAndSE/lighty-template',
packages=['lighty', 'lighty.templates'],
platforms="any",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
],
)
|
bsd-3-clause
| 1,835,447,419,314,017,500 | 31.857143 | 79 | 0.631438 | false |
waynecoulson/TV-Show-Downloader
|
sickbeard/__init__.py
|
1
|
54063
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import cherrypy
import webbrowser
import sqlite3
import datetime
import socket
import os, sys, subprocess, re
import urllib
from threading import Lock
# apparently py2exe won't build these unless they're imported somewhere
from sickbeard import providers, metadata
from providers import kickass, torrentz, dtt
from providers import ezrss, tvtorrents, btn,thepiratebay, nzbmatrix, nzbsrus, newznab, womble, newzbin, nzbs_org_old
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, ConfigMigrator
from sickbeard import searchCurrent, searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, traktWatchListChecker
from sickbeard import helpers, db, exceptions, show_queue, search_queue, scheduler
from sickbeard import logger
from sickbeard import naming
from common import SD, SKIPPED, NAMING_REPEAT
from sickbeard.databases import mainDB, cache_db
from lib.configobj import ConfigObj
invoked_command = None
SOCKET_TIMEOUT = 30
PID = None
CFG = None
CONFIG_FILE = None
# this is the version of the config we EXPECT to find
CONFIG_VERSION = 1
PROG_DIR = '.'
MY_FULLNAME = None
MY_NAME = None
MY_ARGS = []
SYS_ENCODING = ''
DATA_DIR = ''
CREATEPID = False
PIDFILE = ''
DAEMON = None
backlogSearchScheduler = None
currentSearchScheduler = None
showUpdateScheduler = None
versionCheckScheduler = None
showQueueScheduler = None
searchQueueScheduler = None
properFinderScheduler = None
autoPostProcesserScheduler = None
traktWatchListCheckerSchedular = None
showList = None
loadingShowList = None
providerList = []
newznabProviderList = []
metadata_provider_dict = {}
NEWEST_VERSION = None
NEWEST_VERSION_STRING = None
VERSION_NOTIFY = None
INIT_LOCK = Lock()
__INITIALIZED__ = False
started = False
LOG_DIR = None
WEB_PORT = None
WEB_LOG = None
WEB_ROOT = None
WEB_USERNAME = None
WEB_PASSWORD = None
WEB_HOST = None
WEB_IPV6 = None
USE_API = False
API_KEY = None
ENABLE_HTTPS = False
HTTPS_CERT = None
HTTPS_KEY = None
LAUNCH_BROWSER = None
CACHE_DIR = None
ACTUAL_CACHE_DIR = None
ROOT_DIRS = None
USE_BANNER = None
USE_LISTVIEW = None
METADATA_XBMC = None
METADATA_MEDIABROWSER = None
METADATA_PS3 = None
METADATA_WDTV = None
METADATA_TIVO = None
METADATA_SYNOLOGY = None
QUALITY_DEFAULT = None
STATUS_DEFAULT = None
FLATTEN_FOLDERS_DEFAULT = None
PROVIDER_ORDER = []
NAMING_MULTI_EP = None
NAMING_PATTERN = None
NAMING_ABD_PATTERN = None
NAMING_CUSTOM_ABD = None
NAMING_FORCE_FOLDERS = False
TVDB_API_KEY = '9DAF49C96CBF8DAC'
TVDB_BASE_URL = None
TVDB_API_PARMS = {}
USE_NZBS = None
USE_TORRENTS = None
NZB_METHOD = None
NZB_DIR = None
USENET_RETENTION = None
DOWNLOAD_PROPERS = None
SEARCH_FREQUENCY = None
BACKLOG_SEARCH_FREQUENCY = 21
MIN_SEARCH_FREQUENCY = 10
DEFAULT_SEARCH_FREQUENCY = 60
EZRSS = False
DTT = False
DTT_NORAR = False
DTT_SINGLE = False
THEPIRATEBAY = False
THEPIRATEBAY_TRUSTED = False
THEPIRATEBAY_PROXY = False
THEPIRATEBAY_PROXY_URL = None
TVTORRENTS = False
TVTORRENTS_DIGEST = None
TVTORRENTS_HASH = None
KICKASS = False
TORRENTZ = False
TORRENTZ_VERIFIED = False
BTN = False
BTN_API_KEY = None
TORRENT_DIR = None
ADD_SHOWS_WO_DIR = None
CREATE_MISSING_SHOW_DIRS = None
RENAME_EPISODES = False
PROCESS_AUTOMATICALLY = False
KEEP_PROCESSED_DIR = False
MOVE_ASSOCIATED_FILES = False
TV_DOWNLOAD_DIR = None
NZBS = False
NZBS_UID = None
NZBS_HASH = None
WOMBLE = False
NZBSRUS = False
NZBSRUS_UID = None
NZBSRUS_HASH = None
NZBMATRIX = False
NZBMATRIX_USERNAME = None
NZBMATRIX_APIKEY = None
NEWZBIN = False
NEWZBIN_USERNAME = None
NEWZBIN_PASSWORD = None
SAB_USERNAME = None
SAB_PASSWORD = None
SAB_APIKEY = None
SAB_CATEGORY = None
SAB_HOST = ''
NZBGET_PASSWORD = None
NZBGET_CATEGORY = None
NZBGET_HOST = None
TORRENT_USERNAME = None
TORRENT_PASSWORD = None
TORRENT_HOST = ''
TORRENT_PATH = ''
TORRENT_RATIO = ''
TORRENT_PAUSED = False
USE_XBMC = False
XBMC_NOTIFY_ONSNATCH = False
XBMC_NOTIFY_ONDOWNLOAD = False
XBMC_UPDATE_LIBRARY = False
XBMC_UPDATE_FULL = False
XBMC_HOST = ''
XBMC_USERNAME = None
XBMC_PASSWORD = None
USE_PLEX = False
PLEX_NOTIFY_ONSNATCH = False
PLEX_NOTIFY_ONDOWNLOAD = False
PLEX_UPDATE_LIBRARY = False
PLEX_SERVER_HOST = None
PLEX_HOST = None
PLEX_USERNAME = None
PLEX_PASSWORD = None
USE_GROWL = False
GROWL_NOTIFY_ONSNATCH = False
GROWL_NOTIFY_ONDOWNLOAD = False
GROWL_HOST = ''
GROWL_PASSWORD = None
USE_PROWL = False
PROWL_NOTIFY_ONSNATCH = False
PROWL_NOTIFY_ONDOWNLOAD = False
PROWL_API = None
PROWL_PRIORITY = 0
USE_TWITTER = False
TWITTER_NOTIFY_ONSNATCH = False
TWITTER_NOTIFY_ONDOWNLOAD = False
TWITTER_USERNAME = None
TWITTER_PASSWORD = None
TWITTER_PREFIX = None
USE_NOTIFO = False
NOTIFO_NOTIFY_ONSNATCH = False
NOTIFO_NOTIFY_ONDOWNLOAD = False
NOTIFO_USERNAME = None
NOTIFO_APISECRET = None
NOTIFO_PREFIX = None
USE_BOXCAR = False
BOXCAR_NOTIFY_ONSNATCH = False
BOXCAR_NOTIFY_ONDOWNLOAD = False
BOXCAR_USERNAME = None
BOXCAR_PASSWORD = None
BOXCAR_PREFIX = None
USE_PUSHOVER = False
PUSHOVER_NOTIFY_ONSNATCH = False
PUSHOVER_NOTIFY_ONDOWNLOAD = False
PUSHOVER_USERKEY = None
USE_LIBNOTIFY = False
LIBNOTIFY_NOTIFY_ONSNATCH = False
LIBNOTIFY_NOTIFY_ONDOWNLOAD = False
USE_NMJ = False
NMJ_HOST = None
NMJ_DATABASE = None
NMJ_MOUNT = None
USE_SYNOINDEX = False
USE_TRAKT = False
TRAKT_USERNAME = None
TRAKT_PASSWORD = None
TRAKT_API = ''
TRAKT_REMOVE_WATCHLIST = False
TRAKT_USE_WATCHLIST = False
TRAKT_METHOD_ADD = 0
TRAKT_START_PAUSED = False
USE_PYTIVO = False
PYTIVO_NOTIFY_ONSNATCH = False
PYTIVO_NOTIFY_ONDOWNLOAD = False
PYTIVO_UPDATE_LIBRARY = False
PYTIVO_HOST = ''
PYTIVO_SHARE_NAME = ''
PYTIVO_TIVO_NAME = ''
USE_NMA = False
NMA_NOTIFY_ONSNATCH = False
NMA_NOTIFY_ONDOWNLOAD = False
NMA_API = None
NMA_PRIORITY = 0
COMING_EPS_LAYOUT = None
COMING_EPS_DISPLAY_PAUSED = None
COMING_EPS_SORT = None
EXTRA_SCRIPTS = []
GIT_PATH = None
IGNORE_WORDS = "german,french,core2hd,dutch,swedish"
__INITIALIZED__ = False
def get_backlog_cycle_time():
cycletime = SEARCH_FREQUENCY*2+7
return max([cycletime, 720])
def initialize(consoleLogging=True):
with INIT_LOCK:
global LOG_DIR, WEB_PORT, WEB_LOG, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR,TORRENT_METHOD, DOWNLOAD_PROPERS, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_HOST, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_RATIO, TORRENT_PAUSED, \
NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, currentSearchScheduler, backlogSearchScheduler, \
USE_XBMC, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_UPDATE_FULL, \
XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, \
USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_API, TRAKT_REMOVE_WATCHLIST, TRAKT_USE_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktWatchListCheckerSchedular, \
USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_UPDATE_LIBRARY, \
PLEX_SERVER_HOST, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, \
showUpdateScheduler, __INITIALIZED__, LAUNCH_BROWSER, showList, loadingShowList, \
KICKASS, TORRENTZ, TORRENTZ_VERIFIED, \
NZBS, NZBS_UID, NZBS_HASH, EZRSS, TVTORRENTS, TVTORRENTS_DIGEST, TVTORRENTS_HASH, BTN, BTN_API_KEY, TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, \
SEARCH_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_SEARCH_FREQUENCY, \
QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, STATUS_DEFAULT, \
GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, \
USE_GROWL, GROWL_HOST, GROWL_PASSWORD, USE_PROWL, PROWL_NOTIFY_ONSNATCH, PROWL_NOTIFY_ONDOWNLOAD, PROWL_API, PROWL_PRIORITY, PROG_DIR, NZBMATRIX, NZBMATRIX_USERNAME, \
USE_PYTIVO, PYTIVO_NOTIFY_ONSNATCH, PYTIVO_NOTIFY_ONDOWNLOAD, PYTIVO_UPDATE_LIBRARY, PYTIVO_HOST, PYTIVO_SHARE_NAME, PYTIVO_TIVO_NAME, \
USE_NMA, NMA_NOTIFY_ONSNATCH, NMA_NOTIFY_ONDOWNLOAD, NMA_API, NMA_PRIORITY, \
NZBMATRIX_APIKEY, versionCheckScheduler, VERSION_NOTIFY, PROCESS_AUTOMATICALLY, \
KEEP_PROCESSED_DIR, TV_DOWNLOAD_DIR, TVDB_BASE_URL, MIN_SEARCH_FREQUENCY, \
showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, TVDB_API_PARMS, \
NAMING_PATTERN, NAMING_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, \
RENAME_EPISODES, properFinderScheduler, PROVIDER_ORDER, autoPostProcesserScheduler, \
NZBSRUS, NZBSRUS_UID, NZBSRUS_HASH, WOMBLE, providerList, newznabProviderList, \
EXTRA_SCRIPTS, USE_TWITTER, TWITTER_USERNAME, TWITTER_PASSWORD, TWITTER_PREFIX, \
USE_NOTIFO, NOTIFO_USERNAME, NOTIFO_APISECRET, NOTIFO_NOTIFY_ONDOWNLOAD, NOTIFO_NOTIFY_ONSNATCH, \
USE_BOXCAR, BOXCAR_USERNAME, BOXCAR_PASSWORD, BOXCAR_NOTIFY_ONDOWNLOAD, BOXCAR_NOTIFY_ONSNATCH, \
USE_PUSHOVER, PUSHOVER_USERKEY, PUSHOVER_NOTIFY_ONDOWNLOAD, PUSHOVER_NOTIFY_ONSNATCH, \
USE_LIBNOTIFY, LIBNOTIFY_NOTIFY_ONSNATCH, LIBNOTIFY_NOTIFY_ONDOWNLOAD, USE_NMJ, NMJ_HOST, NMJ_DATABASE, NMJ_MOUNT, USE_SYNOINDEX, \
USE_BANNER, USE_LISTVIEW, METADATA_XBMC, METADATA_MEDIABROWSER, METADATA_PS3, METADATA_SYNOLOGY, metadata_provider_dict, \
NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, \
COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, METADATA_WDTV, METADATA_TIVO, IGNORE_WORDS, CREATE_MISSING_SHOW_DIRS, \
ADD_SHOWS_WO_DIR
if __INITIALIZED__:
return False
socket.setdefaulttimeout(SOCKET_TIMEOUT)
CheckSection(CFG, 'General')
CheckSection(CFG, 'Blackhole')
CheckSection(CFG, 'Newzbin')
CheckSection(CFG, 'SABnzbd')
CheckSection(CFG, 'NZBget')
CheckSection(CFG, 'XBMC')
CheckSection(CFG, 'PLEX')
CheckSection(CFG, 'Growl')
CheckSection(CFG, 'Prowl')
CheckSection(CFG, 'Twitter')
CheckSection(CFG, 'NMJ')
CheckSection(CFG, 'Synology')
CheckSection(CFG, 'pyTivo')
CheckSection(CFG, 'NMA')
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
if not helpers.makeDir(LOG_DIR):
logger.log(u"!!! No log folder, logging to screen only!", logger.ERROR)
try:
WEB_PORT = check_setting_int(CFG, 'General', 'web_port', 8081)
except:
WEB_PORT = 8081
if WEB_PORT < 21 or WEB_PORT > 65535:
WEB_PORT = 8081
WEB_HOST = check_setting_str(CFG, 'General', 'web_host', '0.0.0.0')
WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0))
WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/")
WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0))
WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '')
WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '')
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
USE_API = bool(check_setting_int(CFG, 'General', 'use_api', 0))
API_KEY = check_setting_str(CFG, 'General', 'api_key', '')
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', 'server.key')
ACTUAL_CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', 'cache')
# fix bad configs due to buggy code
if ACTUAL_CACHE_DIR == 'None':
ACTUAL_CACHE_DIR = 'cache'
# unless they specify, put the cache dir inside the data dir
if not os.path.isabs(ACTUAL_CACHE_DIR):
CACHE_DIR = os.path.join(DATA_DIR, ACTUAL_CACHE_DIR)
else:
CACHE_DIR = ACTUAL_CACHE_DIR
if not helpers.makeDir(CACHE_DIR):
logger.log(u"!!! Creating local cache dir failed, using system default", logger.ERROR)
CACHE_DIR = None
ROOT_DIRS = check_setting_str(CFG, 'General', 'root_dirs', '')
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
ROOT_DIRS = ''
proxies = urllib.getproxies()
proxy_url = None
if 'http' in proxies:
proxy_url = proxies['http']
elif 'ftp' in proxies:
proxy_url = proxies['ftp']
# Set our common tvdb_api options here
TVDB_API_PARMS = {'apikey': TVDB_API_KEY,
'language': 'en',
'useZip': True}
if CACHE_DIR:
TVDB_API_PARMS['cache'] = os.path.join(CACHE_DIR, 'tvdb')
QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
VERSION_NOTIFY = check_setting_int(CFG, 'General', 'version_notify', 1)
FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0))
PROVIDER_ORDER = check_setting_str(CFG, 'General', 'provider_order', '').split()
NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', '')
NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '')
NAMING_CUSTOM_ABD = check_setting_int(CFG, 'General', 'naming_custom_abd', 0)
NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1)
NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
TVDB_BASE_URL = 'http://www.thetvdb.com/api/' + TVDB_API_KEY
USE_NZBS = bool(check_setting_int(CFG, 'General', 'use_nzbs', 1))
USE_TORRENTS = bool(check_setting_int(CFG, 'General', 'use_torrents', 0))
NZB_METHOD = check_setting_str(CFG, 'General', 'nzb_method', 'blackhole')
if NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
NZB_METHOD = 'blackhole'
TORRENT_METHOD = check_setting_str(CFG, 'General', 'torrent_method', 'blackhole')
if TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission'):
TORRENT_METHOD = 'blackhole'
DOWNLOAD_PROPERS = bool(check_setting_int(CFG, 'General', 'download_propers', 1))
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500)
SEARCH_FREQUENCY = check_setting_int(CFG, 'General', 'search_frequency', DEFAULT_SEARCH_FREQUENCY)
if SEARCH_FREQUENCY < MIN_SEARCH_FREQUENCY:
SEARCH_FREQUENCY = MIN_SEARCH_FREQUENCY
NZB_DIR = check_setting_str(CFG, 'Blackhole', 'nzb_dir', '')
TORRENT_DIR = check_setting_str(CFG, 'Blackhole', 'torrent_dir', '')
TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '')
PROCESS_AUTOMATICALLY = check_setting_int(CFG, 'General', 'process_automatically', 0)
RENAME_EPISODES = check_setting_int(CFG, 'General', 'rename_episodes', 1)
KEEP_PROCESSED_DIR = check_setting_int(CFG, 'General', 'keep_processed_dir', 1)
MOVE_ASSOCIATED_FILES = check_setting_int(CFG, 'General', 'move_associated_files', 0)
CREATE_MISSING_SHOW_DIRS = check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0)
ADD_SHOWS_WO_DIR = check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0)
EZRSS = bool(check_setting_int(CFG, 'General', 'use_torrent', 0))
if not EZRSS:
EZRSS = bool(check_setting_int(CFG, 'EZRSS', 'ezrss', 0))
DTT = bool(check_setting_int(CFG, 'DTT', 'dtt', 0))
DTT_NORAR = bool(check_setting_int(CFG, 'DTT', 'dtt_norar', 0))
DTT_SINGLE = bool(check_setting_int(CFG, 'DTT', 'dtt_single', 0))
TVTORRENTS = bool(check_setting_int(CFG, 'TVTORRENTS', 'tvtorrents', 0))
TVTORRENTS_DIGEST = check_setting_str(CFG, 'TVTORRENTS', 'tvtorrents_digest', '')
TVTORRENTS_HASH = check_setting_str(CFG, 'TVTORRENTS', 'tvtorrents_hash', '')
THEPIRATEBAY = bool(check_setting_int(CFG, 'THEPIRATEBAY', 'thepiratebay', 0))
THEPIRATEBAY_TRUSTED = bool(check_setting_int(CFG, 'THEPIRATEBAY', 'thepiratebay_trusted', 0))
THEPIRATEBAY_PROXY = bool(check_setting_int(CFG, 'THEPIRATEBAY', 'thepiratebay_proxy', 0))
THEPIRATEBAY_PROXY_URL = check_setting_str(CFG, 'THEPIRATEBAY', 'thepiratebay_proxy_url', '')
BTN = bool(check_setting_int(CFG, 'BTN', 'btn', 0))
BTN_API_KEY = check_setting_str(CFG, 'BTN', 'btn_api_key', '')
KICKASS = bool(check_setting_int(CFG, 'KICKASS', 'kickass', 0))
TORRENTZ = bool(check_setting_int(CFG, 'TORRENTZ', 'torrentz', 0))
TORRENTZ_VERIFIED = bool(check_setting_int(CFG, 'TORRENTZ', 'torrentz_verified', 0))
NZBS = bool(check_setting_int(CFG, 'NZBs', 'nzbs', 0))
NZBS_UID = check_setting_str(CFG, 'NZBs', 'nzbs_uid', '')
NZBS_HASH = check_setting_str(CFG, 'NZBs', 'nzbs_hash', '')
NZBSRUS = bool(check_setting_int(CFG, 'NZBsRUS', 'nzbsrus', 0))
NZBSRUS_UID = check_setting_str(CFG, 'NZBsRUS', 'nzbsrus_uid', '')
NZBSRUS_HASH = check_setting_str(CFG, 'NZBsRUS', 'nzbsrus_hash', '')
NZBMATRIX = bool(check_setting_int(CFG, 'NZBMatrix', 'nzbmatrix', 0))
NZBMATRIX_USERNAME = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_username', '')
NZBMATRIX_APIKEY = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_apikey', '')
NEWZBIN = bool(check_setting_int(CFG, 'Newzbin', 'newzbin', 0))
NEWZBIN_USERNAME = check_setting_str(CFG, 'Newzbin', 'newzbin_username', '')
NEWZBIN_PASSWORD = check_setting_str(CFG, 'Newzbin', 'newzbin_password', '')
WOMBLE = bool(check_setting_int(CFG, 'Womble', 'womble', 1))
SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '')
SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '')
SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '')
SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', 'tv')
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789')
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv')
NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '')
TORRENT_USERNAME = check_setting_str(CFG, 'TORRENT', 'torrent_username', '')
TORRENT_PASSWORD = check_setting_str(CFG, 'TORRENT', 'torrent_password', '')
TORRENT_HOST = check_setting_str(CFG, 'TORRENT', 'torrent_host', '')
TORRENT_PATH = check_setting_str(CFG, 'TORRENT', 'torrent_path', '')
TORRENT_RATIO = check_setting_str(CFG, 'TORRENT', 'torrent_ratio', '')
TORRENT_PAUSED = bool(check_setting_int(CFG, 'TORRENT', 'torrent_paused', 0))
USE_XBMC = bool(check_setting_int(CFG, 'XBMC', 'use_xbmc', 0))
XBMC_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_onsnatch', 0))
XBMC_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_ondownload', 0))
XBMC_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_library', 0))
XBMC_UPDATE_FULL = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_full', 0))
XBMC_HOST = check_setting_str(CFG, 'XBMC', 'xbmc_host', '')
XBMC_USERNAME = check_setting_str(CFG, 'XBMC', 'xbmc_username', '')
XBMC_PASSWORD = check_setting_str(CFG, 'XBMC', 'xbmc_password', '')
USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0))
PLEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsnatch', 0))
PLEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_ondownload', 0))
PLEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Plex', 'plex_update_library', 0))
PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '')
PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '')
PLEX_USERNAME = check_setting_str(CFG, 'Plex', 'plex_username', '')
PLEX_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_password', '')
USE_GROWL = bool(check_setting_int(CFG, 'Growl', 'use_growl', 0))
GROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsnatch', 0))
GROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_ondownload', 0))
GROWL_HOST = check_setting_str(CFG, 'Growl', 'growl_host', '')
GROWL_PASSWORD = check_setting_str(CFG, 'Growl', 'growl_password', '')
USE_PROWL = bool(check_setting_int(CFG, 'Prowl', 'use_prowl', 0))
PROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsnatch', 0))
PROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_ondownload', 0))
PROWL_API = check_setting_str(CFG, 'Prowl', 'prowl_api', '')
PROWL_PRIORITY = check_setting_str(CFG, 'Prowl', 'prowl_priority', "0")
USE_TWITTER = bool(check_setting_int(CFG, 'Twitter', 'use_twitter', 0))
TWITTER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_onsnatch', 0))
TWITTER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_ondownload', 0))
TWITTER_USERNAME = check_setting_str(CFG, 'Twitter', 'twitter_username', '')
TWITTER_PASSWORD = check_setting_str(CFG, 'Twitter', 'twitter_password', '')
TWITTER_PREFIX = check_setting_str(CFG, 'Twitter', 'twitter_prefix', 'Sick Beard')
USE_NOTIFO = bool(check_setting_int(CFG, 'Notifo', 'use_notifo', 0))
NOTIFO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Notifo', 'notifo_notify_onsnatch', 0))
NOTIFO_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Notifo', 'notifo_notify_ondownload', 0))
NOTIFO_USERNAME = check_setting_str(CFG, 'Notifo', 'notifo_username', '')
NOTIFO_APISECRET = check_setting_str(CFG, 'Notifo', 'notifo_apisecret', '')
USE_BOXCAR = bool(check_setting_int(CFG, 'Boxcar', 'use_boxcar', 0))
BOXCAR_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar', 'boxcar_notify_onsnatch', 0))
BOXCAR_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar', 'boxcar_notify_ondownload', 0))
BOXCAR_USERNAME = check_setting_str(CFG, 'Boxcar', 'boxcar_username', '')
USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0))
PUSHOVER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsnatch', 0))
PUSHOVER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_ondownload', 0))
PUSHOVER_USERKEY = check_setting_str(CFG, 'Pushover', 'pushover_userkey', '')
USE_LIBNOTIFY = bool(check_setting_int(CFG, 'Libnotify', 'use_libnotify', 0))
LIBNOTIFY_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsnatch', 0))
LIBNOTIFY_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_ondownload', 0))
USE_NMJ = bool(check_setting_int(CFG, 'NMJ', 'use_nmj', 0))
NMJ_HOST = check_setting_str(CFG, 'NMJ', 'nmj_host', '')
NMJ_DATABASE = check_setting_str(CFG, 'NMJ', 'nmj_database', '')
NMJ_MOUNT = check_setting_str(CFG, 'NMJ', 'nmj_mount', '')
USE_SYNOINDEX = bool(check_setting_int(CFG, 'Synology', 'use_synoindex', 0))
USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0))
TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '')
TRAKT_PASSWORD = check_setting_str(CFG, 'Trakt', 'trakt_password', '')
TRAKT_API = check_setting_str(CFG, 'Trakt', 'trakt_api', '')
TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
TRAKT_USE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_watchlist', 0))
TRAKT_METHOD_ADD = check_setting_str(CFG, 'Trakt', 'trakt_method_add', "0")
TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
PYTIVO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsnatch', 0))
PYTIVO_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_ondownload', 0))
PYTIVO_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'pyTivo', 'pyTivo_update_library', 0))
PYTIVO_HOST = check_setting_str(CFG, 'pyTivo', 'pytivo_host', '')
PYTIVO_SHARE_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_share_name', '')
PYTIVO_TIVO_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_tivo_name', '')
USE_NMA = bool(check_setting_int(CFG, 'NMA', 'use_nma', 0))
NMA_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsnatch', 0))
NMA_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_ondownload', 0))
NMA_API = check_setting_str(CFG, 'NMA', 'nma_api', '')
NMA_PRIORITY = check_setting_str(CFG, 'NMA', 'nma_priority', "0")
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
IGNORE_WORDS = check_setting_str(CFG, 'General', 'ignore_words', IGNORE_WORDS)
EXTRA_SCRIPTS = [x for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if x]
USE_BANNER = bool(check_setting_int(CFG, 'General', 'use_banner', 0))
USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0))
METADATA_TYPE = check_setting_str(CFG, 'General', 'metadata_type', '')
metadata_provider_dict = metadata.get_metadata_generator_dict()
# if this exists it's legacy, use the info to upgrade metadata to the new settings
if METADATA_TYPE:
old_metadata_class = None
if METADATA_TYPE == 'xbmc':
old_metadata_class = metadata.xbmc.metadata_class
elif METADATA_TYPE == 'mediabrowser':
old_metadata_class = metadata.mediabrowser.metadata_class
elif METADATA_TYPE == 'ps3':
old_metadata_class = metadata.ps3.metadata_class
if old_metadata_class:
METADATA_SHOW = bool(check_setting_int(CFG, 'General', 'metadata_show', 1))
METADATA_EPISODE = bool(check_setting_int(CFG, 'General', 'metadata_episode', 1))
ART_POSTER = bool(check_setting_int(CFG, 'General', 'art_poster', 1))
ART_FANART = bool(check_setting_int(CFG, 'General', 'art_fanart', 1))
ART_THUMBNAILS = bool(check_setting_int(CFG, 'General', 'art_thumbnails', 1))
ART_SEASON_THUMBNAILS = bool(check_setting_int(CFG, 'General', 'art_season_thumbnails', 1))
new_metadata_class = old_metadata_class(METADATA_SHOW,
METADATA_EPISODE,
ART_POSTER,
ART_FANART,
ART_THUMBNAILS,
ART_SEASON_THUMBNAILS)
metadata_provider_dict[new_metadata_class.name] = new_metadata_class
# this is the normal codepath for metadata config
else:
METADATA_XBMC = check_setting_str(CFG, 'General', 'metadata_xbmc', '0|0|0|0|0|0')
METADATA_MEDIABROWSER = check_setting_str(CFG, 'General', 'metadata_mediabrowser', '0|0|0|0|0|0')
METADATA_PS3 = check_setting_str(CFG, 'General', 'metadata_ps3', '0|0|0|0|0|0')
METADATA_WDTV = check_setting_str(CFG, 'General', 'metadata_wdtv', '0|0|0|0|0|0')
METADATA_TIVO = check_setting_str(CFG, 'General', 'metadata_tivo', '0|0|0|0|0|0')
METADATA_SYNOLOGY = check_setting_str(CFG, 'General', 'metadata_synology', '0|0|0|0|0|0')
for cur_metadata_tuple in [(METADATA_XBMC, metadata.xbmc),
(METADATA_MEDIABROWSER, metadata.mediabrowser),
(METADATA_PS3, metadata.ps3),
(METADATA_WDTV, metadata.wdtv),
(METADATA_TIVO, metadata.tivo),
(METADATA_SYNOLOGY, metadata.synology),
]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
metadata_provider_dict[tmp_provider.name] = tmp_provider
COMING_EPS_LAYOUT = check_setting_str(CFG, 'GUI', 'coming_eps_layout', 'banner')
COMING_EPS_DISPLAY_PAUSED = bool(check_setting_int(CFG, 'GUI', 'coming_eps_display_paused', 0))
COMING_EPS_SORT = check_setting_str(CFG, 'GUI', 'coming_eps_sort', 'date')
newznabData = check_setting_str(CFG, 'Newznab', 'newznab_data', '')
newznabProviderList = providers.getNewznabProviderList(newznabData)
providerList = providers.makeProviderList()
# start up all the threads
logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)
# initialize the main SB database
db.upgradeDatabase(db.DBConnection(), mainDB.InitialSchema)
# initialize the cache database
db.upgradeDatabase(db.DBConnection("cache.db"), cache_db.InitialSchema)
# fix up any db problems
db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck)
# migrate the config if it needs it
migrator = ConfigMigrator(CFG)
migrator.migrate_config()
currentSearchScheduler = scheduler.Scheduler(searchCurrent.CurrentSearcher(),
cycleTime=datetime.timedelta(minutes=SEARCH_FREQUENCY),
threadName="SEARCH",
runImmediately=True)
# the interval for this is stored inside the ShowUpdater class
showUpdaterInstance = showUpdater.ShowUpdater()
showUpdateScheduler = scheduler.Scheduler(showUpdaterInstance,
cycleTime=showUpdaterInstance.updateInterval,
threadName="SHOWUPDATER",
runImmediately=False)
versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
cycleTime=datetime.timedelta(hours=12),
threadName="CHECKVERSION",
runImmediately=True)
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE",
silent=True)
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SEARCHQUEUE",
silent=True)
properFinderInstance = properFinder.ProperFinder()
properFinderScheduler = scheduler.Scheduler(properFinderInstance,
cycleTime=properFinderInstance.updateInterval,
threadName="FINDPROPERS",
runImmediately=False)
autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(),
cycleTime=datetime.timedelta(minutes=10),
threadName="POSTPROCESSER",
runImmediately=True)
traktWatchListCheckerSchedular = scheduler.Scheduler(traktWatchListChecker.TraktChecker(),
cycleTime=datetime.timedelta(minutes=10),
threadName="TRAKTWATCHLIST",
runImmediately=True)
backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(),
cycleTime=datetime.timedelta(minutes=get_backlog_cycle_time()),
threadName="BACKLOG",
runImmediately=True)
backlogSearchScheduler.action.cycleTime = BACKLOG_SEARCH_FREQUENCY
showList = []
loadingShowList = {}
__INITIALIZED__ = True
return True
def start():
global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, \
showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \
properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
traktWatchListCheckerSchedular, started
with INIT_LOCK:
if __INITIALIZED__:
# start the search scheduler
currentSearchScheduler.thread.start()
# start the backlog scheduler
backlogSearchScheduler.thread.start()
# start the show updater
showUpdateScheduler.thread.start()
# start the version checker
versionCheckScheduler.thread.start()
# start the queue checker
showQueueScheduler.thread.start()
# start the search queue checker
searchQueueScheduler.thread.start()
# start the queue checker
properFinderScheduler.thread.start()
# start the proper finder
autoPostProcesserScheduler.thread.start()
# start the trakt watchlist
traktWatchListCheckerSchedular.thread.start()
started = True
def halt ():
global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, showUpdateScheduler, \
showQueueScheduler, properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
traktWatchListCheckerSchedular, started
with INIT_LOCK:
if __INITIALIZED__:
logger.log(u"Aborting all threads")
# abort all the threads
currentSearchScheduler.abort = True
logger.log(u"Waiting for the SEARCH thread to exit")
try:
currentSearchScheduler.thread.join(10)
except:
pass
backlogSearchScheduler.abort = True
logger.log(u"Waiting for the BACKLOG thread to exit")
try:
backlogSearchScheduler.thread.join(10)
except:
pass
showUpdateScheduler.abort = True
logger.log(u"Waiting for the SHOWUPDATER thread to exit")
try:
showUpdateScheduler.thread.join(10)
except:
pass
versionCheckScheduler.abort = True
logger.log(u"Waiting for the VERSIONCHECKER thread to exit")
try:
versionCheckScheduler.thread.join(10)
except:
pass
showQueueScheduler.abort = True
logger.log(u"Waiting for the SHOWQUEUE thread to exit")
try:
showQueueScheduler.thread.join(10)
except:
pass
searchQueueScheduler.abort = True
logger.log(u"Waiting for the SEARCHQUEUE thread to exit")
try:
searchQueueScheduler.thread.join(10)
except:
pass
autoPostProcesserScheduler.abort = True
logger.log(u"Waiting for the POSTPROCESSER thread to exit")
try:
autoPostProcesserScheduler.thread.join(10)
except:
pass
traktWatchListCheckerSchedular.abort = True
logger.log(u"Waiting for the TRAKTWATCHLIST thread to exit")
try:
traktWatchListCheckerSchedular.thread.join(10)
except:
pass
properFinderScheduler.abort = True
logger.log(u"Waiting for the PROPERFINDER thread to exit")
try:
properFinderScheduler.thread.join(10)
except:
pass
__INITIALIZED__ = False
def sig_handler(signum=None, frame=None):
if type(signum) != type(None):
logger.log(u"Signal %i caught, saving and exiting..." % int(signum))
saveAndShutdown()
def saveAll():
global showList
# write all shows
logger.log(u"Saving all shows to the database")
for show in showList:
show.saveToDB()
# save config
logger.log(u"Saving config file to disk")
save_config()
def saveAndShutdown(restart=False):
halt()
saveAll()
logger.log(u"Killing cherrypy")
cherrypy.engine.exit()
if CREATEPID:
logger.log(u"Removing pidfile " + str(PIDFILE))
os.remove(PIDFILE)
if restart:
install_type = versionCheckScheduler.action.install_type
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, MY_FULLNAME]
elif install_type == 'win':
if hasattr(sys, 'frozen'):
# c:\dir\to\updater.exe 12345 c:\dir\to\sickbeard.exe
popen_list = [os.path.join(PROG_DIR, 'updater.exe'), str(PID), sys.executable]
else:
logger.log(u"Unknown SB launch method, please file a bug report about this", logger.ERROR)
popen_list = [sys.executable, os.path.join(PROG_DIR, 'updater.py'), str(PID), sys.executable, MY_FULLNAME ]
if popen_list:
popen_list += MY_ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.log(u"Restarting Sick Beard with " + str(popen_list))
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)
def invoke_command(to_call, *args, **kwargs):
global invoked_command
def delegate():
to_call(*args, **kwargs)
invoked_command = delegate
logger.log(u"Placed invoked command: "+repr(invoked_command)+" for "+repr(to_call)+" with "+repr(args)+" and "+repr(kwargs), logger.DEBUG)
def invoke_restart(soft=True):
invoke_command(restart, soft=soft)
def invoke_shutdown():
invoke_command(saveAndShutdown)
def restart(soft=True):
if soft:
halt()
saveAll()
#logger.log(u"Restarting cherrypy")
#cherrypy.engine.restart()
logger.log(u"Re-initializing all data")
initialize()
else:
saveAndShutdown(restart=True)
def save_config():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
new_config['General'] = {}
new_config['General']['log_dir'] = LOG_DIR
new_config['General']['web_port'] = WEB_PORT
new_config['General']['web_host'] = WEB_HOST
new_config['General']['web_ipv6'] = int(WEB_IPV6)
new_config['General']['web_log'] = int(WEB_LOG)
new_config['General']['web_root'] = WEB_ROOT
new_config['General']['web_username'] = WEB_USERNAME
new_config['General']['web_password'] = WEB_PASSWORD
new_config['General']['use_api'] = int(USE_API)
new_config['General']['api_key'] = API_KEY
new_config['General']['enable_https'] = int(ENABLE_HTTPS)
new_config['General']['https_cert'] = HTTPS_CERT
new_config['General']['https_key'] = HTTPS_KEY
new_config['General']['use_nzbs'] = int(USE_NZBS)
new_config['General']['use_torrents'] = int(USE_TORRENTS)
new_config['General']['nzb_method'] = NZB_METHOD
new_config['General']['torrent_method'] = TORRENT_METHOD
new_config['General']['usenet_retention'] = int(USENET_RETENTION)
new_config['General']['search_frequency'] = int(SEARCH_FREQUENCY)
new_config['General']['download_propers'] = int(DOWNLOAD_PROPERS)
new_config['General']['quality_default'] = int(QUALITY_DEFAULT)
new_config['General']['status_default'] = int(STATUS_DEFAULT)
new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT)
new_config['General']['provider_order'] = ' '.join([x.getID() for x in providers.sortedProviderList()])
new_config['General']['version_notify'] = int(VERSION_NOTIFY)
new_config['General']['naming_pattern'] = NAMING_PATTERN
new_config['General']['naming_custom_abd'] = int(NAMING_CUSTOM_ABD)
new_config['General']['naming_abd_pattern'] = NAMING_ABD_PATTERN
new_config['General']['naming_multi_ep'] = int(NAMING_MULTI_EP)
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
new_config['General']['use_banner'] = int(USE_BANNER)
new_config['General']['use_listview'] = int(USE_LISTVIEW)
new_config['General']['metadata_xbmc'] = metadata_provider_dict['XBMC'].get_config()
new_config['General']['metadata_mediabrowser'] = metadata_provider_dict['MediaBrowser'].get_config()
new_config['General']['metadata_ps3'] = metadata_provider_dict['Sony PS3'].get_config()
new_config['General']['metadata_wdtv'] = metadata_provider_dict['WDTV'].get_config()
new_config['General']['metadata_tivo'] = metadata_provider_dict['TIVO'].get_config()
new_config['General']['metadata_synology'] = metadata_provider_dict['Synology'].get_config()
new_config['General']['cache_dir'] = ACTUAL_CACHE_DIR if ACTUAL_CACHE_DIR else 'cache'
new_config['General']['root_dirs'] = ROOT_DIRS if ROOT_DIRS else ''
new_config['General']['tv_download_dir'] = TV_DOWNLOAD_DIR
new_config['General']['keep_processed_dir'] = int(KEEP_PROCESSED_DIR)
new_config['General']['move_associated_files'] = int(MOVE_ASSOCIATED_FILES)
new_config['General']['process_automatically'] = int(PROCESS_AUTOMATICALLY)
new_config['General']['rename_episodes'] = int(RENAME_EPISODES)
new_config['General']['create_missing_show_dirs'] = CREATE_MISSING_SHOW_DIRS
new_config['General']['add_shows_wo_dir'] = ADD_SHOWS_WO_DIR
new_config['General']['extra_scripts'] = '|'.join(EXTRA_SCRIPTS)
new_config['General']['git_path'] = GIT_PATH
new_config['General']['ignore_words'] = IGNORE_WORDS
new_config['Blackhole'] = {}
new_config['Blackhole']['nzb_dir'] = NZB_DIR
new_config['Blackhole']['torrent_dir'] = TORRENT_DIR
new_config['EZRSS'] = {}
new_config['EZRSS']['ezrss'] = int(EZRSS)
new_config['TVTORRENTS'] = {}
new_config['TVTORRENTS']['tvtorrents'] = int(TVTORRENTS)
new_config['TVTORRENTS']['tvtorrents_digest'] = TVTORRENTS_DIGEST
new_config['TVTORRENTS']['tvtorrents_hash'] = TVTORRENTS_HASH
new_config['KICKASS'] = {}
new_config['KICKASS']['kickass'] = int(KICKASS)
new_config['TORRENTZ'] = {}
new_config['TORRENTZ']['torrentz'] = int(TORRENTZ)
new_config['TORRENTZ']['torrentz_verified'] = int(TORRENTZ_VERIFIED)
new_config['THEPIRATEBAY'] = {}
new_config['THEPIRATEBAY']['thepiratebay'] = int(THEPIRATEBAY)
new_config['THEPIRATEBAY']['thepiratebay_trusted'] = int(THEPIRATEBAY_TRUSTED)
new_config['THEPIRATEBAY']['thepiratebay_proxy'] = int(THEPIRATEBAY_PROXY)
new_config['THEPIRATEBAY']['thepiratebay_proxy_url'] = THEPIRATEBAY_PROXY_URL
new_config['DTT'] = {}
new_config['DTT']['dtt'] = int(DTT)
new_config['DTT']['dtt_norar'] = int(DTT_NORAR)
new_config['DTT']['dtt_single'] = int(DTT_SINGLE)
new_config['BTN'] = {}
new_config['BTN']['btn'] = int(BTN)
new_config['BTN']['btn_api_key'] = BTN_API_KEY
new_config['NZBs'] = {}
new_config['NZBs']['nzbs'] = int(NZBS)
new_config['NZBs']['nzbs_uid'] = NZBS_UID
new_config['NZBs']['nzbs_hash'] = NZBS_HASH
new_config['NZBsRUS'] = {}
new_config['NZBsRUS']['nzbsrus'] = int(NZBSRUS)
new_config['NZBsRUS']['nzbsrus_uid'] = NZBSRUS_UID
new_config['NZBsRUS']['nzbsrus_hash'] = NZBSRUS_HASH
new_config['NZBMatrix'] = {}
new_config['NZBMatrix']['nzbmatrix'] = int(NZBMATRIX)
new_config['NZBMatrix']['nzbmatrix_username'] = NZBMATRIX_USERNAME
new_config['NZBMatrix']['nzbmatrix_apikey'] = NZBMATRIX_APIKEY
new_config['Newzbin'] = {}
new_config['Newzbin']['newzbin'] = int(NEWZBIN)
new_config['Newzbin']['newzbin_username'] = NEWZBIN_USERNAME
new_config['Newzbin']['newzbin_password'] = NEWZBIN_PASSWORD
new_config['Womble'] = {}
new_config['Womble']['womble'] = int(WOMBLE)
new_config['SABnzbd'] = {}
new_config['SABnzbd']['sab_username'] = SAB_USERNAME
new_config['SABnzbd']['sab_password'] = SAB_PASSWORD
new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY
new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
new_config['SABnzbd']['sab_host'] = SAB_HOST
new_config['NZBget'] = {}
new_config['NZBget']['nzbget_password'] = NZBGET_PASSWORD
new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY
new_config['NZBget']['nzbget_host'] = NZBGET_HOST
new_config['TORRENT'] = {}
new_config['TORRENT']['torrent_username'] = TORRENT_USERNAME
new_config['TORRENT']['torrent_password'] = TORRENT_PASSWORD
new_config['TORRENT']['torrent_host'] = TORRENT_HOST
new_config['TORRENT']['torrent_path'] = TORRENT_PATH
new_config['TORRENT']['torrent_ratio'] = TORRENT_RATIO
new_config['TORRENT']['torrent_paused'] = int(TORRENT_PAUSED)
new_config['XBMC'] = {}
new_config['XBMC']['use_xbmc'] = int(USE_XBMC)
new_config['XBMC']['xbmc_notify_onsnatch'] = int(XBMC_NOTIFY_ONSNATCH)
new_config['XBMC']['xbmc_notify_ondownload'] = int(XBMC_NOTIFY_ONDOWNLOAD)
new_config['XBMC']['xbmc_update_library'] = int(XBMC_UPDATE_LIBRARY)
new_config['XBMC']['xbmc_update_full'] = int(XBMC_UPDATE_FULL)
new_config['XBMC']['xbmc_host'] = XBMC_HOST
new_config['XBMC']['xbmc_username'] = XBMC_USERNAME
new_config['XBMC']['xbmc_password'] = XBMC_PASSWORD
new_config['Plex'] = {}
new_config['Plex']['use_plex'] = int(USE_PLEX)
new_config['Plex']['plex_notify_onsnatch'] = int(PLEX_NOTIFY_ONSNATCH)
new_config['Plex']['plex_notify_ondownload'] = int(PLEX_NOTIFY_ONDOWNLOAD)
new_config['Plex']['plex_update_library'] = int(PLEX_UPDATE_LIBRARY)
new_config['Plex']['plex_server_host'] = PLEX_SERVER_HOST
new_config['Plex']['plex_host'] = PLEX_HOST
new_config['Plex']['plex_username'] = PLEX_USERNAME
new_config['Plex']['plex_password'] = PLEX_PASSWORD
new_config['Growl'] = {}
new_config['Growl']['use_growl'] = int(USE_GROWL)
new_config['Growl']['growl_notify_onsnatch'] = int(GROWL_NOTIFY_ONSNATCH)
new_config['Growl']['growl_notify_ondownload'] = int(GROWL_NOTIFY_ONDOWNLOAD)
new_config['Growl']['growl_host'] = GROWL_HOST
new_config['Growl']['growl_password'] = GROWL_PASSWORD
new_config['Prowl'] = {}
new_config['Prowl']['use_prowl'] = int(USE_PROWL)
new_config['Prowl']['prowl_notify_onsnatch'] = int(PROWL_NOTIFY_ONSNATCH)
new_config['Prowl']['prowl_notify_ondownload'] = int(PROWL_NOTIFY_ONDOWNLOAD)
new_config['Prowl']['prowl_api'] = PROWL_API
new_config['Prowl']['prowl_priority'] = PROWL_PRIORITY
new_config['Twitter'] = {}
new_config['Twitter']['use_twitter'] = int(USE_TWITTER)
new_config['Twitter']['twitter_notify_onsnatch'] = int(TWITTER_NOTIFY_ONSNATCH)
new_config['Twitter']['twitter_notify_ondownload'] = int(TWITTER_NOTIFY_ONDOWNLOAD)
new_config['Twitter']['twitter_username'] = TWITTER_USERNAME
new_config['Twitter']['twitter_password'] = TWITTER_PASSWORD
new_config['Twitter']['twitter_prefix'] = TWITTER_PREFIX
new_config['Notifo'] = {}
new_config['Notifo']['use_notifo'] = int(USE_NOTIFO)
new_config['Notifo']['notifo_notify_onsnatch'] = int(NOTIFO_NOTIFY_ONSNATCH)
new_config['Notifo']['notifo_notify_ondownload'] = int(NOTIFO_NOTIFY_ONDOWNLOAD)
new_config['Notifo']['notifo_username'] = NOTIFO_USERNAME
new_config['Notifo']['notifo_apisecret'] = NOTIFO_APISECRET
new_config['Boxcar'] = {}
new_config['Boxcar']['use_boxcar'] = int(USE_BOXCAR)
new_config['Boxcar']['boxcar_notify_onsnatch'] = int(BOXCAR_NOTIFY_ONSNATCH)
new_config['Boxcar']['boxcar_notify_ondownload'] = int(BOXCAR_NOTIFY_ONDOWNLOAD)
new_config['Boxcar']['boxcar_username'] = BOXCAR_USERNAME
new_config['Pushover'] = {}
new_config['Pushover']['use_pushover'] = int(USE_PUSHOVER)
new_config['Pushover']['pushover_notify_onsnatch'] = int(PUSHOVER_NOTIFY_ONSNATCH)
new_config['Pushover']['pushover_notify_ondownload'] = int(PUSHOVER_NOTIFY_ONDOWNLOAD)
new_config['Pushover']['pushover_userkey'] = PUSHOVER_USERKEY
new_config['Libnotify'] = {}
new_config['Libnotify']['use_libnotify'] = int(USE_LIBNOTIFY)
new_config['Libnotify']['libnotify_notify_onsnatch'] = int(LIBNOTIFY_NOTIFY_ONSNATCH)
new_config['Libnotify']['libnotify_notify_ondownload'] = int(LIBNOTIFY_NOTIFY_ONDOWNLOAD)
new_config['NMJ'] = {}
new_config['NMJ']['use_nmj'] = int(USE_NMJ)
new_config['NMJ']['nmj_host'] = NMJ_HOST
new_config['NMJ']['nmj_database'] = NMJ_DATABASE
new_config['NMJ']['nmj_mount'] = NMJ_MOUNT
new_config['Synology'] = {}
new_config['Synology']['use_synoindex'] = int(USE_SYNOINDEX)
new_config['Trakt'] = {}
new_config['Trakt']['use_trakt'] = int(USE_TRAKT)
new_config['Trakt']['trakt_username'] = TRAKT_USERNAME
new_config['Trakt']['trakt_password'] = TRAKT_PASSWORD
new_config['Trakt']['trakt_api'] = TRAKT_API
new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
new_config['Trakt']['trakt_use_watchlist'] = int(TRAKT_USE_WATCHLIST)
new_config['Trakt']['trakt_method_add'] = TRAKT_METHOD_ADD
new_config['Trakt']['trakt_start_paused'] = int(TRAKT_START_PAUSED)
new_config['pyTivo'] = {}
new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO)
new_config['pyTivo']['pytivo_notify_onsnatch'] = int(PYTIVO_NOTIFY_ONSNATCH)
new_config['pyTivo']['pytivo_notify_ondownload'] = int(PYTIVO_NOTIFY_ONDOWNLOAD)
new_config['pyTivo']['pyTivo_update_library'] = int(PYTIVO_UPDATE_LIBRARY)
new_config['pyTivo']['pytivo_host'] = PYTIVO_HOST
new_config['pyTivo']['pytivo_share_name'] = PYTIVO_SHARE_NAME
new_config['pyTivo']['pytivo_tivo_name'] = PYTIVO_TIVO_NAME
new_config['NMA'] = {}
new_config['NMA']['use_nma'] = int(USE_NMA)
new_config['NMA']['nma_notify_onsnatch'] = int(NMA_NOTIFY_ONSNATCH)
new_config['NMA']['nma_notify_ondownload'] = int(NMA_NOTIFY_ONDOWNLOAD)
new_config['NMA']['nma_api'] = NMA_API
new_config['NMA']['nma_priority'] = NMA_PRIORITY
new_config['Newznab'] = {}
new_config['Newznab']['newznab_data'] = '!!!'.join([x.configStr() for x in newznabProviderList])
new_config['GUI'] = {}
new_config['GUI']['coming_eps_layout'] = COMING_EPS_LAYOUT
new_config['GUI']['coming_eps_display_paused'] = int(COMING_EPS_DISPLAY_PAUSED)
new_config['GUI']['coming_eps_sort'] = COMING_EPS_SORT
new_config['General']['config_version'] = CONFIG_VERSION
new_config.write()
def launchBrowser(startPort=None):
if not startPort:
startPort = WEB_PORT
if ENABLE_HTTPS:
browserURL = 'https://localhost:%d%s' % (startPort, WEB_ROOT)
else:
browserURL = 'http://localhost:%d%s' % (startPort, WEB_ROOT)
try:
webbrowser.open(browserURL, 2, 1)
except:
try:
webbrowser.open(browserURL, 1, 1)
except:
logger.log(u"Unable to launch a browser", logger.ERROR)
def getEpList(epIDs, showid=None):
if epIDs == None or len(epIDs) == 0:
return []
query = "SELECT * FROM tv_episodes WHERE tvdbid in (%s)" % (",".join(['?']*len(epIDs)),)
params = epIDs
if showid != None:
query += " AND showid = ?"
params.append(showid)
myDB = db.DBConnection()
sqlResults = myDB.select(query, params)
epList = []
for curEp in sqlResults:
curShowObj = helpers.findCertainShow(showList, int(curEp["showid"]))
curEpObj = curShowObj.getEpisode(int(curEp["season"]), int(curEp["episode"]))
epList.append(curEpObj)
return epList
|
gpl-3.0
| -6,555,618,988,849,008,000 | 41.072374 | 186 | 0.628064 | false |
plotly/plotly.py
|
packages/python/plotly/plotly/validators/layout/ternary/_caxis.py
|
1
|
11077
|
import _plotly_utils.basevalidators
class CaxisValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="caxis", parent_name="layout.ternary", **kwargs):
super(CaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Caxis"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The
maximum is determined by the sum minus the
minimum values of the other two axes. The full
view corresponds to all the minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.
ternary.caxis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.ternary.caxis.tickformatstopdefaults), sets
the default property values to use for elements
of layout.ternary.caxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.cax
is.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
layout.ternary.caxis.title.font instead. Sets
this axis' title font. Note that the title's
font used to be customized by the now
deprecated `titlefont` attribute.
uirevision
Controls persistence of user-driven changes in
axis `min`, and `title` if in `editable: true`
configuration. Defaults to
`ternary<N>.uirevision`.
""",
),
**kwargs
)
|
mit
| -2,687,160,254,442,917,000 | 48.013274 | 84 | 0.528482 | false |
ajinabraham/Mobile-Security-Framework-MobSF
|
scripts/mass_static_analysis.py
|
1
|
3070
|
#!/usr/bin/env python
# Mass Static Analysis
import os
import urllib.request
import urllib.error
import urllib.parse
import argparse
import requests
import logging
logger = logging.getLogger(__name__)
def is_server_up(url):
try:
response = urllib.request.urlopen(url, timeout=5)
return True
except urllib.error.URLError:
pass
return False
def start_scan(directory, server_url, apikey, rescan='0'):
print("\nLooking for Android/iOS/Windows binaries or source code in : " + directory)
logger.info("Uploading to MobSF Server")
uploaded = []
MIME = {
".apk": 'application/octet-stream',
".ipa": 'application/octet-stream',
".appx": 'application/octet-stream',
".zip": 'application/zip'
}
for filename in os.listdir(directory):
fpath = os.path.join(directory, filename)
_, ext = os.path.splitext(fpath)
if ext in MIME:
files = {'file': (filename, open(fpath, 'rb'),
MIME[ext], {'Expires': '0'})}
response = requests.post(
server_url + '/api/v1/upload', files=files, headers={'AUTHORIZATION': apikey})
if response.status_code == 200 and "hash" in response.json():
logger.info("[OK] Upload OK: " + filename)
uploaded.append(response.json())
else:
logger.error("Performing Upload: " + filename)
logger.info("Running Static Analysis")
for upl in uploaded:
logger.info("Started Static Analysis on: %s", upl["file_name"])
if rescan == '1':
upl["re_scan"] = 1
response = requests.post(
server_url + "/api/v1/scan", data=upl, headers={'AUTHORIZATION': apikey})
if response.status_code == 200:
logger.info("[OK] Static Analysis Complete: " + upl["file_name"])
else:
logger.error("Performing Static Analysis: " + upl["file_name"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
help="Path to the directory that contains mobile app binary/zipped source code")
parser.add_argument(
"-s", "--ipport", help="IP address and Port number of a running MobSF Server. (ex: 127.0.0.1:8000)")
parser.add_argument(
"-k", "--apikey", help="MobSF REST API Key")
parser.add_argument(
"-r", "--rescan", help="Run a fresh scan. Value can be 1 or 0 (Default: 0)")
args = parser.parse_args()
if args.directory and args.ipport and args.apikey:
server = args.ipport
directory = args.directory
server_url = "http://" + server
apikey = args.apikey
rescan = args.rescan
if is_server_up(server_url) == False:
print("MobSF REST API Server is not running at " + server_url)
print("Exiting!")
exit(0)
# MobSF is running, start scan
start_scan(directory, server_url, apikey, rescan)
else:
parser.print_help()
|
gpl-3.0
| 169,144,342,619,840,500 | 35.547619 | 108 | 0.587296 | false |
luzi82/HiSocial
|
core/user/UserGroup.py
|
1
|
2042
|
from sqlalchemy import Column, String
from sqlalchemy.schema import ForeignKey
from user import User
import Group
from base.DatabaseBase import DBB
class UserGroup(DBB):
__tablename__ = "hs_user_usergroup"
user_id = Column(String(User.USER_ID_LENGTH),ForeignKey("hs_user_user.user_id"), primary_key=True)
group_id = Column(String(Group.GROUP_ID_LENGTH),ForeignKey("hs_user_group.group_id"), primary_key=True)
# user_relationsip = relationship("User", backref="parent_assocs")
# group_relationsip = relationship("Group", backref="parent_assocs")
def __init__(self, user_id, group_id):
self.user_id = user_id
self.group_id = group_id
def join(session,user_id,group_id):
'''
Add user to group
:type session: sqlalchemy.orm.session.Session
:param session: sqlalchemy DB Session
:type group_id: str
:param group_id: The group id
:type user_id: str
:param user_id: The user id
'''
session.add(UserGroup(user_id=user_id,group_id=group_id))
session.flush()
def unjoin(session,user_id,group_id):
'''
Remove user from group
:type session: sqlalchemy.orm.session.Session
:param session: sqlalchemy DB Session
:type group_id: str
:param group_id: The group id
:type user_id: str
:param user_id: The user id
:rtype: boolean
:return: True iff success
'''
ret = session.query(UserGroup).filter(UserGroup.user_id==user_id).filter(UserGroup.group_id==group_id).delete() > 0
session.flush()
return ret
def get_group(session,user_id):
'''
Get user group
:type session: sqlalchemy.orm.session.Session
:param session: sqlalchemy DB Session
:type group_id: str
:param group_id: The group id
:type user_id: str
:param user_id: The user id
:rtype: boolean
:return: True iff success
'''
t = session.query(UserGroup.group_id).filter(UserGroup.user_id==user_id).all()
if t == None : return []
return [ i[0] for i in t ]
|
gpl-3.0
| -638,250,286,764,452,500 | 26.594595 | 119 | 0.653771 | false |
foxcarlos/trelar
|
pruebaCliente.py
|
1
|
4112
|
from trello import TrelloClient
import json
# api_key='your-key' - 05571f7ecf0d9177c020b9ab3495aaac
# api_secret='your-secret' - 03a65f30fb4946c03cba7998327f7e10025493f7e036408d6efbcdbd63fc66f5
# token='your-oauth-token-key' - 1316a12594687b611d0c631896b9b421436bd955d0d376162521c5ed267155d8
# token_secret='your-oauth-token-secret' - 5d0d9ac40b148703315c54e64b7998d2
client = TrelloClient(api_key='05571f7ecf0d9177c020b9ab3495aaac',
api_secret='03a65f30fb4946c03cba7998327f7e10025493f7e036408d6efbcdbd63fc66f5',
token='1316a12594687b611d0c631896b9b421436bd955d0d376162521c5ed267155d8',
token_secret='5d0d9ac40b148703315c54e64b7998d2'
)
def getTableros(filtro=""):
filtroPasado = filtro
tableros = client.list_boards(board_filter=filtroPasado)
lista = []
registros = [(tn.name, tn.id) for tn in tableros]
for f in registros:
campos = ['nombre_tablero', 'id_tablero']
convertir = dict(zip(campos, f))
lista.append(convertir)
tablerosDevolver = json.dumps(lista)
return tablerosDevolver
# Obtener un tablero por su ID
tablero = client.get_board('57581f7d6a945e2f6630a793')
print(tablero)
# Obtener todas las listas de un tablero
print( tablero.all_lists() )
# Obtener de un tablero una lista o columna por su ID
lista = tablero.get_list('57582109bba4b95e66dbf4e1')
# Obtener de una lista la cantidad de tarjetas que posee
lista.cardsCnt()
# Obtener todas las tarjetas que posee
lista.list_cards()
# Listar los tableros Abiertos
print( client.list_boards(board_filter="open") )
# Listar todos los tableros
print( client.list_boards() )
# Listar columnas
abiertos = client.list_boards(board_filter="open")
for b in abiertos:
b.all_lists()
# Listar atributos
for b in abiertos:
for l in b.all_lists():
for c in l.list_cards():
print( c.fetch() )
# Obtener una tarjeta por su ID
def getTarjeta(id_tarjeta):
tarjeta = client.get_card(id_tarjeta)
# Obtener la fecha en que se movio un tarjeta
tarjeta.listCardMove_date()
# Mas sobre las tarjetas
tarjeta.listCardMove_date() #Litas las cuales a pertenecido esta tarjeta
tarjeta.create_date
tarjeta.dateLastActivity
tarjeta.get_comments()
tarjeta.id
tarjeta.idLabels
tarjeta.labels # Lista con todas las propiedades de la etiqueta
# Ejemplo:
for l in tarjeta.labels:
l.id, l.name, l.color
tarjeta.idList
tarjeta.latestCardMove_date #Fecha de la ultima vez que se movio la tarjeta
tarjeta.list_labels # Lista de etiquetas, una tarjeta puede contener varias labels
# Ejemplo:
for l in tarjeta.list_labels:
l.id, l.name, l.color
tarjeta.member_ids
tarjeta.name
#
def getLista(id_lista):
tableroRecibido = {'id': '57581f7d6a945e2f6630a793'}
tableroId = tableroRecibido['id']
miTablero = client.get_board('57581f7d6a945e2f6630a793')
lista = miTablero.get_list(id_lista)
# Cerrar sesion token
client.logout
# Otros Comandos de pruebas realizados para obtener inforamcion de las tarjetas
"""
>>> c.assign
>>> c.attach
>>> c.attachments
>>> c.attriExp
>>> c.board
<Board trelar>
>>> c.checklists
>>> c.client
<trello.trelloclient.TrelloClient object at 0xb41b4c4c>
>>> c.closed
False
>>> c.comment
<bound method Card.comment of <Card Testing a py-trello, obtener un listado de todos los tableros.>>
>>> c.create_date
datetime.datetime(2016, 6, 9, 4, 49, 2, 620000, tzinfo=tzutc())
>>> c.dateLastActivity
datetime.datetime(2016, 6, 9, 4, 49, 2, 610000, tzinfo=tzutc())
>>> c.date_last_activity
datetime.datetime(2016, 6, 9, 4, 49, 2, 610000, tzinfo=tzutc())
>>> c.desc
u''
>>> c.description
u''
>>> c.due
>>> c.due_date
''
>>> c.id
u'5758f53e74a534e5f6ef252b'
>>> c.idBoard
u'57581f7d6a945e2f6630a793'
>>> c.idLabels
[]
>>> c.idList
u'57582126be36c73d6688b24b'
>>> c.idMembers
[]
>>> c.idShort
16
>>> c.labels
[]
>>> c.label_ids
[]
>>> c.shortUrl
u'https://trello.com/c/9bDQNh0q'
>>> c.trello_list
<List En Desarrollo>
>>> c.url
u'https://trello.com/c/9bDQNh0q/16-testing-a-py-trello-obtener-un-listado-de-todos-los-tableros'
"""
|
mit
| 4,625,675,501,696,865,000 | 23.046784 | 100 | 0.71571 | false |
diofant/diofant
|
diofant/tests/logic/test_dimacs.py
|
1
|
1069
|
"""Various tests on satisfiability using dimacs cnf file syntax
You can find lots of cnf files in
ftp://dimacs.rutgers.edu/pub/challenge/satisfiability/benchmarks/cnf/
"""
import os
from diofant.logic.algorithms.dpll import dpll_satisfiable
from diofant.logic.algorithms.dpll2 import \
dpll_satisfiable as dpll2_satisfiable
from diofant.logic.utilities.dimacs import load
__all__ = ()
def load_file(location):
"""Loads a boolean expression from a file."""
location = os.path.dirname(__file__) + '/' + location
with open(location) as f:
s = f.read()
return load(s)
def test_f1():
assert bool(dpll_satisfiable(load_file('simple_v3_c2.cnf')))
def test_f2():
assert bool(dpll_satisfiable(load_file('quinn.cnf')))
def test_f3():
assert bool(dpll_satisfiable(load_file('f3.cnf')))
def test_f4():
assert not bool(dpll_satisfiable(load_file('hole6.cnf')))
def test_f5():
assert bool(dpll_satisfiable(load_file('f5.cnf')))
def test_f6():
assert not bool(dpll2_satisfiable(load_file('aim-50-2_0-no-2.cnf')))
|
bsd-3-clause
| 7,600,316,817,505,577,000 | 21.744681 | 72 | 0.692236 | false |
teonlamont/mne-python
|
mne/time_frequency/tests/test_tfr.py
|
3
|
25782
|
import numpy as np
import os.path as op
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import pytest
import mne
from mne import Epochs, read_events, pick_types, create_info, EpochsArray
from mne.io import read_raw_fif
from mne.utils import _TempDir, run_tests_if_main, requires_h5py, grand_average
from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,
tfr_multitaper, AverageTFR, read_tfrs,
write_tfrs, combine_tfr, cwt, _compute_tfr,
EpochsTFR)
from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet
from mne.viz.utils import _fake_click
from itertools import product
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
event_fname = op.join(data_path, 'test-eve.fif')
raw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif')
def test_tfr_ctf():
"""Test that TFRs can be calculated on CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = mne.make_fixed_length_events(raw, duration=0.5)
epochs = mne.Epochs(raw, events)
for method in (tfr_multitaper, tfr_morlet):
method(epochs, [10], 1) # smoke test
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert (np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_picks_avg.data)
assert_array_almost_equal(itc.data, itc_picks.data)
assert_array_almost_equal(power.data, power_evoked.data)
# complex output
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
return_itc=False, average=True, output="complex")
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
output="complex", average=False, return_itc=True)
epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles,
output="complex", average=False,
return_itc=False)
epochs_power_2 = abs(epochs_power_complex)
epochs_power_3 = epochs_power_2.copy()
epochs_power_3.data[:] = np.inf # test that it's actually copied
assert_array_almost_equal(epochs_power_2.data, epochs_power_picks.data)
power_2 = epochs_power_2.average()
assert_array_almost_equal(power_2.data, power.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert 'meg' in power
assert 'grad' in power
assert 'mag' not in power
assert 'eeg' not in power
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (power_.data.shape == (len(picks), len(freqs), 2))
assert (power_.data.shape == itc_.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2]))
assert_equal(itc2.ch_names[1:], gave.ch_names)
assert_equal(gave.nave, 2)
itc2.drop_channels(itc2.info["bads"])
assert_array_almost_equal(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_array_almost_equal(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
return_itc=False).data[0]
assert (tfr.shape == (len(picks), len(freqs), len(times)))
tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2), average=False,
return_itc=False).data[0]
assert (tfr2.shape == (len(picks), len(freqs), 2))
single_power = tfr_morlet(epochs, freqs, 2, average=False,
return_itc=False).data
single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
average=False, return_itc=False).data
single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
average=False, return_itc=False).data
single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
average=False, return_itc=False).data
assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
assert_array_almost_equal(np.mean(single_power2, axis=0),
power.data[:, :, :2])
assert_array_almost_equal(np.mean(single_power3, axis=0),
power.data[:, :, 1:3])
assert_array_almost_equal(np.mean(single_power4, axis=0),
power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = list(range(50, 55))
decim = 2
_, n_chan, n_time = data.shape
tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
return_itc=False).data[0]
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
cwt(data[0], Ws, use_fft=use_fft, mode=mode)
# Test decim parameter checks
pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
# When convolving in time, wavelets must not be longer than the data
pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws,
use_fft=False)
with pytest.warns(UserWarning, match='one of the wavelets is longer'):
cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)
# Check for off-by-one errors when using wavelets with an even number of
# samples
psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')
assert_equal(psd.shape, (2, 1, 420))
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert (len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@pytest.mark.slowtest
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
# Test channel picking
power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
assert_equal(power_epochs_picked.ch_names, ['SIM0001'])
pytest.raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_averaged.data)
assert_array_almost_equal(power.times, power_epochs.times)
assert_array_almost_equal(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert (tmax > 0.3 and tmax < 0.7)
assert not np.any(itc.data < 0.)
assert (fmax > 40 and fmax < 60)
assert (power2.data.shape == (len(picks), len(freqs), 2))
assert (power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities."""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
pytest.raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert (isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
pytest.raises(ValueError, read_tfrs, fname, condition='nonono')
# Test save of EpochsTFR.
data = np.zeros((5, 3, 2, 3))
tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,
comment='test', method='crazy-tfr')
tfr.save(fname, True)
read_tfr = read_tfrs(fname)[0]
assert_array_equal(tfr.data, read_tfr.data)
def test_plot():
"""Test TFR plotting."""
import matplotlib.pyplot as plt
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title', colorbar=False,
mask=np.ones(tfr.data.shape[1:], bool))
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot([1, 2], title='title', colorbar=False, exclude='bads')
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_plot_joint():
"""Test TFR joint plotting."""
import matplotlib.pyplot as plt
raw = read_raw_fif(raw_fname)
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
topomap_args = {'res': 8, 'contours': 0, 'sensors': False}
for combine in ('mean', 'rms', None):
tfr.plot_joint(title='auto', colorbar=True,
combine=combine, topomap_args=topomap_args)
plt.close('all')
# check various timefreqs
for timefreqs in (
{(tfr.times[0], tfr.freqs[1]): (0.1, 0.5),
(tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)},
[(tfr.times[1], tfr.freqs[1])]):
tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args)
plt.close('all')
# test bad timefreqs
timefreqs = ([(-100, 1)], tfr.times[1], [1],
[(tfr.times[1], tfr.freqs[1], tfr.freqs[1])])
for these_timefreqs in timefreqs:
pytest.raises(ValueError, tfr.plot_joint, these_timefreqs)
# test that the object is not internally modified
tfr_orig = tfr.copy()
tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]],
topomap_args=topomap_args)
plt.close('all')
assert_array_equal(tfr.data, tfr_orig.data)
assert (set(tfr.ch_names) == set(tfr_orig.ch_names))
assert (set(tfr.times) == set(tfr_orig.times))
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
assert all(ch in tfr_new.ch_names
for ch in tfr.ch_names if ch != 'STIM 001')
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])
pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for func, use_fft, zero_mean, output in product(
(tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (func == tfr_array_multitaper) and (output == 'phase'):
pytest.raises(NotImplementedError, func, data, sfreq=sfreq,
freqs=freqs, output=output)
continue
# Check runs
out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft,
zero_mean=zero_mean, n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex, out.dtype)
else:
assert_equal(np.float, out.dtype)
assert (np.all(np.isfinite(out)))
# Check errors params
for _data in (None, 'foo', data[0]):
pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
# No time_bandwidth param in morlet
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert (np.sum(out >= 1) == 0)
assert (np.sum(out <= 0) == 0)
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
output='avg_power', n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
run_tests_if_main()
|
bsd-3-clause
| -3,954,698,318,471,807,000 | 39.096423 | 79 | 0.59111 | false |
ctk3b/InterMol
|
intermol/forces/nonlinear_bond_type.py
|
1
|
1553
|
import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class NonlinearBondType(AbstractBondType):
__slots__ = ['epsilon', 'r0', 'lamda', 'order', 'c']
@accepts_compatible_units(None, None,
epsilon=units.kilojoules_per_mole,
r0=units.nanometers,
lamda=units.nanometers,
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
epsilon=0.0 * units.kilojoules_per_mole,
r0=0.0 * units.nanometers,
lamda=0.0 * units.nanometers,
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.epsilon = epsilon
self.r0 = r0
self.lamda = lamda
class NonlinearBond(NonlinearBondType):
"""
http://lammps.sandia.gov/doc/bond_nonlinear.html
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
epsilon=0.0 * units.kilojoules_per_mole,
r0=0.0 * units.nanometers,
lamda=0.0 * units.nanometers,
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
NonlinearBondType.__init__(self, bondingtype1, bondingtype2,
epsilon=epsilon,
r0=r0,
lamda=lamda,
order=order, c=c)
|
mit
| 6,348,931,378,617,034,000 | 36 | 77 | 0.542176 | false |
immo/pyTOM
|
df/df_volume_control.py
|
1
|
1322
|
# coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from df_global import *
def get_instrument_volumes():
g = DfGlobal()
ivol = {}
for i in g["used.instruments"]:
ivol[i] = g["instruments"][i].volume
return ivol
def set_instrument_volumes(ivol):
g = DfGlobal()
for i in ivol:
g["instruments"][i].volume = ivol[i]
def save_volume_preset(name):
g = DfGlobal()
g["volume.presets"][name] = get_instrument_volumes()
def load_volume_preset(name):
g = DfGlobal()
set_instrument_volumes(g["volume.presets"][name])
|
gpl-3.0
| -985,648,683,511,073,000 | 30.47619 | 80 | 0.689864 | false |
ZeitOnline/zeit.content.article
|
src/zeit/content/article/browser/form.py
|
1
|
4236
|
from zeit.cms.checkout.interfaces import ILocalContent
from zeit.content.article.i18n import MessageFactory as _
import gocept.form.grouped
import uuid
import zeit.cms.browser.form
import zeit.cms.content.browser.form
import zeit.cms.interfaces
import zeit.cms.settings.interfaces
import zeit.content.article.interfaces
import zeit.edit.interfaces
import zeit.wysiwyg.interfaces
import zope.browser.interfaces
import zope.event
import zope.formlib.form
import zope.lifecycleevent
base = zeit.cms.content.browser.form.CommonMetadataFormBase
class ArticleFormBase(object):
form_fields = zope.formlib.form.FormFields(
zeit.content.article.interfaces.IArticleMetadata,
zeit.cms.interfaces.ICMSContent).omit('textLength',
'has_recensions')
field_groups = (
base.navigation_fields,
base.head_fields,
base.text_fields,
gocept.form.grouped.RemainingFields(
_('misc.'),
css_class='column-right'),
gocept.form.grouped.Fields(
_("Options"),
base.option_fields.fields + (
'has_recensions', 'artbox_thema'),
css_class='column-right checkboxes'))
class AddAndCheckout(zeit.cms.browser.view.Base):
def __call__(self):
article = self.get_article()
name = '{0}.tmp'.format(uuid.uuid4())
zeit.cms.repository.interfaces.IAutomaticallyRenameable(
article).renameable = True
self.context[name] = article
self.redirect(self.url(self.context[name], '@@checkout'))
def get_article(self):
article = zeit.content.article.article.Article()
settings = zeit.cms.settings.interfaces.IGlobalSettings(
self.context)
article.year = settings.default_year
article.volume = settings.default_volume
article.ressort = self._get_source_value(article, 'ressort')
article.sub_ressort = self._get_source_value(article, 'sub_ressort')
if article.ressort:
article.channels = ((article.ressort, article.sub_ressort),)
article.body.create_item('image')
zope.event.notify(zope.lifecycleevent.ObjectCreatedEvent(article))
return article
def _get_source_value(self, article, fieldname):
token = self.request.form.get('form.%s' % fieldname)
source = zeit.content.article.interfaces.IArticle[fieldname].source
source = source(article)
if not token:
return
terms = zope.component.getMultiAdapter(
(source, self.request), zope.browser.interfaces.ITerms)
return terms.getValue(token)
class EditForm(ArticleFormBase,
zeit.cms.content.browser.form.CommonMetadataEditForm):
title = _('Edit article')
class DisplayForm(ArticleFormBase,
zeit.cms.content.browser.form.CommonMetadataDisplayForm):
title = _('View article metadata')
class WYSIWYGEdit(zeit.cms.browser.form.EditForm):
"""Edit article content using wysiwyg editor."""
form_fields = (
zope.formlib.form.FormFields(
zeit.content.article.interfaces.IArticleMetadata).select(
'supertitle', 'title', 'byline', 'subtitle') +
zope.formlib.form.FormFields(
zeit.wysiwyg.interfaces.IHTMLContent))
field_groups = (
gocept.form.grouped.RemainingFields(
_('Content'),
css_class='full-width wide-widgets'),)
class DispatchToViewOrEdit(zeit.cms.browser.view.Base):
def __call__(self):
in_repository = not ILocalContent.providedBy(self.context)
existing_checkout = self._find_checked_out()
if in_repository and existing_checkout:
self.redirect(self.url(existing_checkout))
else:
view = zope.component.getMultiAdapter(
(self.context, self.request), name='edit.html')
return view()
def _find_checked_out(self):
for item in zeit.cms.checkout.interfaces.IWorkingcopy(None).values():
if not zeit.cms.interfaces.ICMSContent.providedBy(item):
continue
if item.uniqueId == self.context.uniqueId:
return item
|
bsd-3-clause
| 3,718,190,842,757,117,400 | 33.721311 | 77 | 0.652266 | false |
TaliesinSkye/evennia
|
wintersoasis-master/objects/examples/player.py
|
1
|
3539
|
"""
Template module for Players
Copy this module up one level and name it as you like, then
use it as a template to create your own Player class.
To make the default account login default to using a Player
of your new type, change settings.BASE_PLAYER_TYPECLASS to point to
your new class, e.g.
settings.BASE_PLAYER_TYPECLASS = "game.gamesrc.objects.myplayer.MyPlayer"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Player
class ExamplePlayer(Player):
"""
This class describes the actual OOC player (i.e. the user connecting
to the MUD). It does NOT have visual appearance in the game world (that
is handled by the character which is connected to this). Comm channels
are attended/joined using this object.
It can be useful e.g. for storing configuration options for your game, but
should generally not hold any character-related info (that's best handled
on the character level).
Can be set using BASE_PLAYER_TYPECLASS.
* available properties
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Player, read-only) - link to database model. dbobj.typeclass points back to this class
typeclass (Player, read-only) - this links back to this class as an identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, data=None)
swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None, use_nicks=False, location=None, ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods (when re-implementation, remember methods need to have self as first arg)
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_cmdset_get()
at_first_login()
at_post_login()
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
pass
|
bsd-3-clause
| 298,927,947,473,682,300 | 38.322222 | 129 | 0.716869 | false |
songjun54cm/ml_idiot
|
nn/theano_nn/models/BasicModel.py
|
1
|
3684
|
__author__ = 'SongJun-Dell'
import logging
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
class BasicModel(object):
def __init__(self, state):
self.state = state
self.floatX = theano.config.floatX
self.layers = list()
self.params = list()
self.regularize_params = list()
def save(self, filename):
"""
Save the model to file `filename`
"""
vals = dict([(x.name, x.get_value()) for x in self.params])
np.savez(filename, **vals)
def load(self, filename):
"""
Load the model.
"""
vals = np.load(filename)
for p in self.params:
if p.name in vals:
logging.debug('Loading {} of {}'.format(p.name, p.get_value(borrow=True).shape))
if p.get_value().shape != vals[p.name].shape:
raise Exception('Shape mismatch: {} != {} for {}'.format(p.get_value().shape, vals[p.name].shape, p.name))
p.set_value(vals[p.name])
else:
logging.error('No parameter {} given: default initialization used'.format(p.name))
unknown = set(vals.keys()) - {p.name for p in self.params}
if len(unknown):
logging.error('Unknown parameters {} given'.format(unknown))
def add_layer(self, layer):
self.layers.append(layer)
self.params += layer.params
self.regularize_params += layer.regularize_params
return layer
def check(self):
num_params = 0
num_regulirize_params = 0
for layer in self.layers:
num_params += len(layer.params)
num_regulirize_params += len(layer.regularize_params)
assert( num_params==len(self.params), 'params number not equal!')
# assert( num_grad_params==len(self.grad_params), 'grad params number not equal!')
assert( num_regulirize_params==len(self.regularize_params), 'regularize params number not equal!')
def get_regularization(self):
reg_value = 0
for p in self.regularize_params:
reg_value += p.norm(L=2)
return reg_value
def create_updates(self, loss, params, method='rmsprop', updates=None, gradients=None):
lr = theano.shared(np.float64(self.state['learning_rate']).astype(theano.config.floatX))
grad_clip = theano.shared(np.float64(self.state['grad_clip']).astype(theano.config.floatX))
ngrad_clip = theano.shared(np.float64(-self.state['grad_clip']).astype(theano.config.floatX))
momentum = theano.shared(np.float64(self.state['momentum']).astype(theano.config.floatX))
decay_rate = theano.shared(np.float64(self.state['decay_rate']).astype(theano.config.floatX))
smooth_eps = theano.shared(np.float64(self.state['smooth_eps']).astype(theano.config.floatX))
gcaches = [theano.shared(np.zeros_like(param.get_value(borrow=True))) for param in params]
gparams = T.grad(loss, params) if gradients is None else gradients
if updates is None:
updates = OrderedDict()
if method == 'rmsprop':
for gparam, param, gcache in zip(gparams, params, gcaches):
gparam = T.switch(T.ge(gparam, grad_clip), grad_clip, gparam)
gparam = T.switch(T.le(gparam, ngrad_clip), ngrad_clip, gparam)
updates[gcache] = gcache * decay_rate + (1.0 - decay_rate) * gparam ** 2
gparam = gparam / T.sqrt(updates[gcache] + smooth_eps)
updates[param] = param - gparam * lr
return updates, gcaches, grad_clip, lr, ngrad_clip
|
gpl-3.0
| -3,854,536,841,698,917,000 | 42.857143 | 126 | 0.602334 | false |
lief-project/LIEF
|
tests/vdex/vdex_test.py
|
1
|
3828
|
#!/usr/bin/env python
import json
import logging
import os
import pprint
import unittest
from unittest import TestCase
import lief
from utils import get_sample
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
lief.logging.set_level(lief.logging.LOGGING_LEVEL.DEBUG)
class TestVDEX(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
def test_vdex06(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
# 1 Dex File registred
self.assertEqual(len(telecom.dex_files), 1)
dex_file = telecom.dex_files[0]
dex2dex_json_info_lhs = json.loads(dex_file.dex2dex_json_info)
json_test_path = os.path.join(CURRENT_DIR, "VDEX_06_AArch64_Telecom_quickinfo.json")
dex2dex_json_info_rhs = None
#self.maxDiff = None
with open(json_test_path, 'r') as f:
dex2dex_json_info_rhs = json.load(f)
self.assertEqual(dex2dex_json_info_lhs, dex2dex_json_info_rhs)
def test_vdex10(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
# 1 Dex File registred
self.assertEqual(len(telecom.dex_files), 1)
dex_file = telecom.dex_files[0]
dex2dex_json_info_lhs = json.loads(dex_file.dex2dex_json_info)
json_test_path = os.path.join(CURRENT_DIR, "VDEX_10_AArch64_Telecom_quickinfo.json")
dex2dex_json_info_rhs = None
self.maxDiff = None
with open(json_test_path, 'r') as f:
dex2dex_json_info_rhs = json.load(f)
self.assertEqual(dex2dex_json_info_lhs, dex2dex_json_info_rhs)
class TestVDEX06(TestCase):
def test_header(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
header = telecom.header
self.assertEqual(header.magic, [118, 100, 101, 120])
self.assertEqual(header.version, 6)
self.assertEqual(header.nb_dex_files, 1)
self.assertEqual(header.dex_size, 940500)
self.assertEqual(header.quickening_info_size, 18104)
self.assertEqual(header.verifier_deps_size, 11580)
def test_dex_files(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
h = hash(telecom.dex_files[0])
h_file = lief.hash(telecom.dex_files[0].raw(False))
h_file_dopt = lief.hash(telecom.dex_files[0].raw(True))
#self.assertEqual(h, 8527372568967457956)
#self.assertEqual(h_file, 18446744072392183797)
#self.assertEqual(h_file_dopt, 18446744073629421797)
class TestVDEX10(TestCase):
def test_header(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
header = telecom.header
self.assertEqual(header.magic, [118, 100, 101, 120])
self.assertEqual(header.version, 10)
self.assertEqual(header.nb_dex_files, 1)
self.assertEqual(header.dex_size, 1421904)
self.assertEqual(header.quickening_info_size, 584)
self.assertEqual(header.verifier_deps_size, 18988)
def test_dex_files(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
h = hash(telecom.dex_files[0])
h_file = lief.hash(telecom.dex_files[0].raw(False))
h_file_dopt = lief.hash(telecom.dex_files[0].raw(True))
#self.assertEqual(h, 4434625889427456908)
#self.assertEqual(h_file, 18446744071715884987)
#self.assertEqual(h_file_dopt, 18446744072171126186)
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
|
apache-2.0
| 1,116,827,325,749,575,000 | 31.440678 | 92 | 0.651515 | false |
rschenck/Capsid_IDP_Classifier
|
development/tuning_and_validating.py
|
1
|
9852
|
#!/usr/bin/env python
import sys
import operator
import pandas as pd
import numpy as np
from sklearn import cross_validation
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from scipy import interp
from dataset import load_data
# obtains the classifications from the final curated dataset
def get_targets():
with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_TABLE.csv','r') as table:
typed = {}
for line in table:
line = line.split(',')
acc = line[1].rstrip(' ')
typed.update({acc:line[2]})
return typed
# obtain FINAL_DATASET for model (all data)
def get_data():
with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_SCORES.csv', 'r') as scores:
scores = scores.readlines()
formatted = []
for item in scores:
item = item.rstrip('\n')
item = item.split(',')
sample = [item[0]]
for i in range(1, len(item)):
ind = float(item[i])
sample.append(ind)
formatted.append(sample)
scores = None
return formatted
# get arrays after fetching the proper classification and getting that classifications set of scores
def get_arrays(types, scores):
order_types = []
out_scores = []
for item in scores:
acc = item[0]
ctype = types[acc]
order_types.append(ctype)
del item[0]
out_scores.append(item)
# the arrays needed for cross validation
type_array = np.asarray(order_types)
scores = np.asarray(out_scores)
# cleanup
item = None
ourder_types = None
out_scores = None
return scores, type_array
# ExtraTreesClassifier model
def extratrees_model(x, y):
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
clf = clf.fit(x, y)
return clf
# Voting model
def results_vote(x, y):
pass
# Section for running loops on different parameters
def tune_model_parameters(data, targets):
# cross validate and tuning of the ExtraTreesClassifier parameters
my_range = range(1,20)
n_scores = []
for n in my_range:
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
scores = cross_validation.cross_val_score(clf, data, targets, cv=10, scoring='accuracy')
n_scores.append(scores.mean())
plt.plot(my_range,n_scores)
plt.xlabel('Number of Trees in the Forest')
plt.ylabel('Cross-Validated Accuracy (10-fold Mean)')
plt.show()
#plt.savefig('/Users/ryan/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/max_features_10_126.png', bbox_inches = 'tight')
# get the parameter with the maximum mean output
m = max(n_scores)
mi = min(n_scores)
print 'Max Accuracy: ' + repr(m)
index = [i for i, j in enumerate(n_scores) if j == m]
for i in index:
print 'Parameter value max: ' + repr(my_range[i])
indexmi = [i for i, j in enumerate(n_scores) if j == mi]
print 'Min Accuracy: ' + repr(mi)
for i in indexmi:
print 'Parameter value min: ' + repr(my_range[i])
# get ROC curves for the predictions
def get_roc(data, targets):
# binarize the classifactions
bi_targets = label_binarize(targets, classes=['Type A', 'Type B', 'Neither'])
#print bi_targets
#print targets
n_classes = bi_targets.shape[1]
#print n_classes
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data, bi_targets, train_size=.8)
# convert array to array of strings instead of arrays of arrays for the classifier (for the weights)
string_test = []
for i in range(0, len(y_train)):
string_test.append(str(y_train[i]))
string_test = np.asarray(string_test)
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"[1 0 0]":0.4,"[0 1 0]":0.5,"[0 1 0]":0.1}, bootstrap=False, max_features=125, criterion='gini', n_jobs = -1)
model = clf.fit(X_train, string_test)
y_score = model.predict(X_test)
# get output of scores from string list into a np array
array_scores = []
for item in y_score:
ind = item.split(' ')
ind0 = ind[0].lstrip('[')
ind1 = ind[1]
ind2 = ind[2].rstrip(']')
ind = [int(ind0),int(ind1), int(ind2)]
array_scores.append(ind)
array_scores = np.asarray(array_scores)
print array_scores
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], array_scores[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), array_scores.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
'''
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristics')
plt.legend(loc="lower right")
plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/ROC_curves.eps', bbox_inches = 'tight')
# plot confusion matrices
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Greens):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def cm_model_p1(X_train, y_train):
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
model = clf.fit(X_train, y_train)
return model
def cm_model_p2(model, X_test):
# generate 100 predictions and vote for the majority for final prediction
hundred_pred = []
for i in range(0,100):
y_pred = model.predict(X_test)
hundred_pred.append(y_pred)
final_pred = []
for i in range(0, len(hundred_pred[0])):
types = []
for k,t in enumerate(hundred_pred):
types.append(hundred_pred[k][i])
counts = [types.count('Type A'),types.count('Type B'),types.count('Neither')]
index, value = max(enumerate(counts), key=operator.itemgetter(1))
if index == 0:
final_pred.append('Type A')
elif index == 1:
final_pred.append('Type B')
elif index == 2:
final_pred.append('Neither')
else:
pass
y_pred = np.asarray(final_pred)
return y_pred
# Generate confusion matrix
def get_conf_matrix(data, targets):
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data, targets, train_size=.8)
# gets the model for predictions
model = cm_model_p1(X_train, y_train)
# generate 100 confusion matrices, get mean value for each
out_cm = np.zeros((3,3))
for i in range(0,100):
y_pred = cm_model_p2(model, X_test)
# Compute confusion matrix
labels = ['Type A', 'Type B', 'Neither']
cm = confusion_matrix(y_test, y_pred, labels=labels)
np.set_printoptions(precision=2)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
out_cm += cm_normalized
print out_cm
cm_normalized = np.divide(out_cm, 100.0)
print('Normalized confusion matrix (Mean of 100 predictions)')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, labels, title='Normalized confusion matrix')
# plt.show()
plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/confusion_matrix_RYANFINAL_100mean.eps', bbox_inches = 'tight')
def main():
'''
# Use these three to get the data loaded, targets loaded, and the accessions stripped (Otherwise use dataset.py load_data())
# get classifications
type_dict = get_targets()
# load data
scores = get_data()
# get arrays of scores and targets
data, targets = get_arrays(type_dict, scores)
'''
data, targets = load_data()
# tune model parameters
#tune_model_parameters(data,targets)
# get ROC curves
#get_roc(data, targets)
# get confusion matrix
get_conf_matrix(data, targets)
'''I WANT TO RE-RUN the ROC curves and the Confusion matrix data using predictions from a cross-validation rather than train/test_split'''
if __name__ == "__main__":
main()
|
gpl-2.0
| 384,085,018,262,306,300 | 31.411184 | 168 | 0.685546 | false |
cwaldbieser/txcas
|
txcas/couchdb_ticket_store.py
|
1
|
29177
|
# Standard library
from __future__ import print_function
import datetime
from functools import partial
import json
import random
import string
import sys
from textwrap import dedent
import uuid
from xml.sax.saxutils import escape as xml_escape
# Application modules
from txcas.ca_trust import createCustomPolicyFactoryFromPEMs
from txcas.exceptions import (
CASError, InvalidTicket, InvalidService,
NotSSOService, InvalidTicketSpec)
from txcas.http import (
createNonVerifyingHTTPClient, createVerifyingHTTPClient)
from txcas.interface import (
ITicketStore, ITicketStoreFactory,
IServiceManagerAcceptor)
from txcas.settings import get_bool, export_settings_to_dict, load_settings
from txcas.urls import are_urls_equal
from txcas.utils import (
filter_args, get_missing_args, http_status_filter, unwrap_failures)
# External modules
from dateutil.parser import parse as parse_date
import treq
from twisted.internet import defer
from twisted.internet.task import LoopingCall
from twisted.plugin import IPlugin
from twisted.python import log
from twisted.web.http_headers import Headers
from zope.interface import implements
class CouchDBError(Exception):
pass
class CouchDBTicketStoreFactory(object):
implements(IPlugin, ITicketStoreFactory)
tag = "couchdb_ticket_store"
opt_help = dedent('''\
A ticket store that manages all CAS tickets an external
CouchDB database.
Any tickets in the store when the CAS process is stopped
are retained when it is restarted.
Valid options include:
- couch_host
- couch port
- couch_db
- couch_user
- couch_passwd
- use_https
- verify_cert
- ca_cert
- lt_lifespan
- st_lifespan
- pt_lifespan
- tgt_lifespan
- pgt_lifespan
- ticket_size
''')
opt_usage = '''A colon-separated key=value list.'''
def generateTicketStore(self, argstring=""):
scp = load_settings('cas', syspath='/etc/cas')
settings = export_settings_to_dict(scp)
ts_props = settings.get('CAS', {})
ts_settings = settings.get('CouchDB', {})
settings_xlate = {
'host': 'couch_host',
'port': 'couch_port',
'db': 'couch_db',
'user': 'couch_user',
'passwd': 'couch_passwd',
'https': 'use_https',
'debug': '_debug',
}
temp = {}
for k, v in ts_settings.iteritems():
k = settings_xlate.get(k, k)
temp[k] = v
ts_settings = temp
del temp
if argstring.strip() != "":
argdict = dict((x.split('=') for x in argstring.split(':')))
ts_settings.update(argdict)
missing = get_missing_args(
CouchDBTicketStore.__init__, ts_settings, ['self'])
if len(missing) > 0:
sys.stderr.write(
"[ERROR][CouchDBTicketStore] "
"Missing the following settings: %s" % ', '.join(missing))
sys.stderr.write('\n')
sys.exit(1)
props = (
'lt_lifespan', 'st_lifespan', 'pt_lifespan',
'tgt_lifespan', 'pgt_lifespan', 'ticket_size', '_debug')
ts_props = dict((prop, int(ts_props[prop])) for prop in props if prop in ts_props)
filter_args(CouchDBTicketStore.__init__, ts_settings, ['self'])
if 'couch_port' in ts_settings:
ts_settings['couch_port'] = int(ts_settings['couch_port'])
if 'use_https' in ts_settings:
ts_settings['use_https'] = get_bool(ts_settings['use_https'])
if 'verify_cert' in ts_settings:
ts_settings['verify_cert'] = get_bool(ts_settings['verify_cert'])
if '_debug' in ts_settings:
ts_settings['_debug'] = get_bool(ts_settings['_debug'])
obj = CouchDBTicketStore(**ts_settings)
for prop, value in ts_props.iteritems():
setattr(obj, prop, value)
buf = ["[CONFIG][CouchDBTicketStore] Settings:"]
d = dict(ts_settings)
d.update(ts_props)
for k in sorted(d.keys()):
v = d[k]
if k == 'couch_passwd':
v = '*******'
buf.append(" - %s: %s" % (k, v))
sys.stderr.write('\n'.join(buf))
sys.stderr.write('\n')
return obj
class CouchDBTicketStore(object):
"""
A ticket store that uses an external CouchDB.
"""
implements(IPlugin, ITicketStore, IServiceManagerAcceptor)
lt_lifespan = 60*5
st_lifespan = 10
pt_lifespan = 10
tgt_lifespan = 60 * 60 * 24 * 2
pgt_lifespan = 60 * 60 * 2
charset = string.ascii_letters + string.digits + '-'
ticket_size = 256
_check_expired_interval = 60 * 1
service_manager = None
_expired_margin = 60*2
_expirationLoop = None
def __init__(self, couch_host, couch_port, couch_db,
couch_user, couch_passwd, use_https=True,
reactor=None, _debug=False, verify_cert=True,
ca_cert=None):
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self._debug = _debug
self._expire_callback = (lambda ticket, data, explicit: None)
self._couch_host = couch_host
self._couch_port = couch_port
self._couch_db = couch_db
self._couch_user = couch_user
self._couch_passwd = couch_passwd
if verify_cert:
if ca_cert:
policy_factory = createCustomPolicyFactoryFromPEMs(ca_cert)
else:
policy_factory = None
self.httpClientFactory = partial(
createVerifyingHTTPClient, policy_factory=policy_factory)
else:
self.httpClientFactory = createNonVerifyingHTTPClient
if use_https:
self._scheme = 'https://'
else:
self._scheme = 'http://'
self.createExpirationChecker()
def createExpirationChecker(self):
if self._expirationLoop is not None:
self._expirationLoop.stop()
check_expired_interval = self.check_expired_interval
if check_expired_interval == 0:
self._expirationLoop = None
else:
expirationLoop = LoopingCall(self._clean_expired)
expirationLoop.clock = self.reactor
expirationLoop.start(self.check_expired_interval, now=False)
self._expirationLoop = expirationLoop
@property
def check_expired_interval(self):
return self._check_expired_interval
@check_expired_interval.setter
def check_expired_interval(self, value):
self._check_expired_interval = value
self.createExpirationChecker()
@defer.inlineCallbacks
def _clean_expired(self):
"""
Clean up any expired tickets.
"""
try:
url = '''%(scheme)s%(host)s:%(port)s/%(db)s/_design/views/_view/get_by_expires''' % {
'scheme': self._scheme,
'host': self._couch_host,
'port': self._couch_port,
'db': self._couch_db}
url = url.encode('utf-8')
earliest = datetime.datetime.today() - datetime.timedelta(seconds=self._expired_margin)
params = {
'descending': 'true',
'startkey': json.dumps(earliest.strftime("%Y-%m-%dT%H:%M:%S")),
}
self.debug("[DEBUG][CouchDB] _clean_expired(), url: %s" % url)
self.debug("[DEBUG][CouchDB] _clean_expired(), params: %s" % str(params))
httpClient = self.httpClientFactory(self.reactor)
response = yield httpClient.get(url,
params=params,
headers=Headers({'Accept': ['application/json']}),
auth=(self._couch_user, self._couch_passwd))
response = yield http_status_filter(response, [(200,200)], CouchDBError)
doc = yield treq.json_content(response)
rows = doc[u'rows']
if len(rows) > 0:
del_docs = []
for row in rows:
ticket_id = row[u'value']
try:
yield self._expireTicket(ticket_id)
except CouchDBError as ex:
log.msg("CouchDB error while attempting to delete expired tickets.")
log.err(ex)
except Exception as ex:
log.err(ex)
def _getServiceValidator(self):
service_mgr = self.service_manager
if service_mgr is None:
return (lambda x: True)
else:
return service_mgr.isValidService
def _getServiceSSOPredicate(self):
service_mgr = self.service_manager
if service_mgr is None:
return (lambda x: True)
else:
return service_mgr.isSSOService
def debug(self, msg):
if self._debug:
log.msg(msg)
def _validService(self, service):
def cb(result):
if not result:
return defer.fail(InvalidService(
"Service '%s' is not allowed by this CAS service." % service))
return defer.succeed(service)
return defer.maybeDeferred(self._getServiceValidator(), service).addCallback(cb)
def _isSSOService(self, service):
def cb(result):
if not result:
return defer.fail(NotSSOService(service))
return defer.maybeDeferred(self._getServiceSSOPredicate(), service).addCallback(cb)
def _generate(self, prefix):
r = prefix
size = self.ticket_size
while len(r) < size:
r += random.choice(self.charset)
return r
def _mkTicket(self, prefix, data, timeout):
"""
Create a ticket prefixed with C{prefix}
The ticket will expire after my class' C{lifespan} seconds.
@param prefix: String prefix for the token.
@param data: Data associated with this ticket (which will be returned
when L{_useTicket} is called).
"""
ticket = self._generate(prefix)
data['ticket_id'] = ticket
expires = datetime.datetime.today() + datetime.timedelta(seconds=timeout)
data[u'expires'] = expires.strftime('%Y-%m-%dT%H:%M:%S')
if 'pgts' in data:
data[u'pgts'] = list(data['pgts'])
url = '''%(scheme)s%(host)s:%(port)s/%(db)s''' % {
'scheme': self._scheme,
'host': self._couch_host,
'port': self._couch_port,
'db': self._couch_db}
url = url.encode('utf-8')
doc = json.dumps(data)
self.debug("[DEBUG][CouchDB] _mkTicket(): url: %s" % url)
self.debug("[DEBUG][CouchDB] _mkTicket(): doc: %s" % doc)
def return_ticket(result, ticket):
self.debug("[DEBUG][CouchDB] _mkTicket(), ticket: %s" % ticket)
return ticket
httpClient = self.httpClientFactory(self.reactor)
d = httpClient.post(url, data=doc, auth=(self._couch_user, self._couch_passwd),
headers=Headers({
'Accept': ['application/json'],
'Content-Type': ['application/json']}))
d.addCallback(http_status_filter, [(201,201)], CouchDBError)
d.addCallback(treq.content)
d.addCallback(return_ticket, ticket)
return d
@defer.inlineCallbacks
def _fetch_ticket(self, ticket):
"""
Fetch a ticket representation from CouchDB.
"""
url = '''%(scheme)s%(host)s:%(port)s/%(db)s/_design/views/_view/get_ticket''' % {
'scheme': self._scheme,
'host': self._couch_host,
'port': self._couch_port,
'db': self._couch_db}
url = url.encode('utf-8')
params = {'key': json.dumps(ticket.encode('utf-8'))}
self.debug("[DEBUG][CouchDB] _fetch_ticket(), url: %s" % url)
self.debug("[DEBUG][CouchDB] _fetch_ticket(), params: %s" % str(params))
httpClient = self.httpClientFactory(self.reactor)
response = yield httpClient.get(url,
params=params,
headers=Headers({'Accept': ['application/json']}),
auth=(self._couch_user, self._couch_passwd))
response = yield http_status_filter(response, [(200,200)], CouchDBError)
doc = yield treq.json_content(response)
rows = doc[u'rows']
if len(rows) > 0:
entry = rows[0][u'value']
entry[u'expires'] = parse_date(entry[u'expires'])
if u'pgts' in entry:
entry[u'pgts'] = set(entry[u'pgts'])
defer.returnValue(entry)
defer.returnValue(None)
@defer.inlineCallbacks
def _update_ticket(self, _id, _rev, data):
"""
Update a ticket in CouchDB.
"""
data[u'expires'] = data[u'expires'].strftime('%Y-%m-%dT%H:%M:%S')
if u'pgts' in data:
data[u'pgts'] = list(data[u'pgts'])
url = '''%(scheme)s%(host)s:%(port)s/%(db)s/%(docid)s''' % {
'scheme': self._scheme,
'host': self._couch_host,
'port': self._couch_port,
'db': self._couch_db,
'docid': _id}
url = url.encode('utf-8')
data['_rev'] = _rev.encode('utf-8')
try:
doc = json.dumps(data)
except Exception as ex:
self.debug("[DEBUG][CouchDB] Failed to serialze doc:\n%s" % (str(data)))
raise
httpClient = self.httpClientFactory(self.reactor)
self.debug('''[DEBUG][CouchDB] request_method="PUT" url="{0}"'''.format(url))
self.debug('''[DEBUG][CouchDB] document => {0}'''.format(data))
response = yield httpClient.put(
url,
data=doc,
auth=(self._couch_user, self._couch_passwd),
headers=Headers({
'Accept': ['application/json'],
'Content-Type': ['application/json']}))
response = yield http_status_filter(response, [(201,201)], CouchDBError)
doc = yield treq.json_content(response)
defer.returnValue(None)
@defer.inlineCallbacks
def _delete_ticket(self, _id, _rev):
"""
Delete a ticket from CouchDB.
"""
url = '''%(scheme)s%(host)s:%(port)s/%(db)s/%(docid)s''' % {
'scheme': self._scheme,
'host': self._couch_host,
'port': self._couch_port,
'db': self._couch_db,
'docid': _id}
url = url.encode('utf-8')
params = {'rev': _rev}
self.debug('[DEBUG][CouchDB] _delete_ticket(), url: %s' % url)
self.debug('[DEBUG][CouchDB] _delete_ticket(), params: %s' % str(params))
httpClient = self.httpClientFactory(self.reactor)
response = yield httpClient.delete(
url,
params=params,
auth=(self._couch_user, self._couch_passwd),
headers=Headers({'Accept': ['application/json']}))
response = yield http_status_filter(response, [(200,200)], CouchDBError)
resp_text = yield treq.content(response)
defer.returnValue(None)
@defer.inlineCallbacks
def _expireTicket(self, ticket):
"""
This function should only be called when a ticket is expired via
a timeout or indirectly (e.g. TGT expires so derived PGTs are expired).
"""
entry = yield self._fetch_ticket(ticket)
if entry is not None:
_id = entry['_id']
_rev = entry['_rev']
del entry[u'_id']
del entry[u'_rev']
yield self._delete_ticket(_id, _rev)
yield self._expire_callback(ticket, entry, False)
defer.returnValue(None)
@defer.inlineCallbacks
def _useTicket(self, ticket, _consume=True):
"""
Consume a ticket, producing the data that was associated with the ticket
when it was created.
@raise InvalidTicket: If the ticket doesn't exist or is no longer valid.
"""
entry = yield self._fetch_ticket(ticket)
if entry is not None:
_id = entry[u'_id']
_rev = entry[u'_rev']
expires = entry[u'expires']
now = datetime.datetime.today()
if now >= expires:
raise InvalidTicket("Ticket has expired.")
del entry[u'_id']
del entry[u'_rev']
if _consume:
yield self._delete_ticket(_id, _rev)
yield self._expire_callback(ticket, entry, True)
else:
if ticket.startswith(u'PT-'):
timeout = self.lifespan
elif ticket.startswith(u'ST-'):
timeout = self.lifespan
elif ticket.startswith(u'LT-'):
timeout = self.lt_lifespan
elif ticket.startswith(u'PGT-'):
timeout = self.pgt_lifespan
elif ticket.startswith(u'TGC-'):
timeout = self.tgt_lifespan
else:
timeout = 10
now = datetime.datetime.today()
expires = now + datetime.timedelta(seconds=timeout)
entry[u'expires'] = expires
yield self._update_ticket(_id, _rev, entry)
defer.returnValue(entry)
else:
raise InvalidTicket("Ticket '%s' does not exist." % ticket)
@defer.inlineCallbacks
def _informTGTOfService(self, st, service, tgt):
"""
Record in the TGT that a service has requested an ST.
"""
entry = yield self._fetch_ticket(tgt)
if entry is None:
raise InvalidTicket("Ticket '%s' does not exist." % tgt)
_id = entry[u'_id']
_rev = entry[u'_rev']
del entry[u'_id']
del entry[u'_rev']
services = entry.setdefault('services', {})
services[service] = st
yield self._update_ticket(_id, _rev, entry)
defer.returnValue(st)
@defer.inlineCallbacks
def _informTGTOfPGT(self, pgt, tgt):
"""
Record in the TGT that a service has requested an ST.
"""
if not pgt.startswith("PGT-"):
raise InvalidTicket("PGT '%s' is not valid." % pgt)
if not tgt.startswith("TGC-"):
raise InvalidTicket("TGT '%s' is not valid." % tgt)
entry = yield self._fetch_ticket(tgt)
if entry is None:
raise InvalidTicket("Ticket '%s' does not exist." % tgt)
_id = entry[u'_id']
_rev = entry[u'_rev']
del entry[u'_id']
del entry[u'_rev']
pgts = entry.setdefault('pgts', set([]))
pgts.add(pgt)
yield self._update_ticket(_id, _rev, entry)
defer.returnValue(pgt)
def mkLoginTicket(self, service):
"""
Create a login ticket.
"""
d = self._validService(service)
def cb(_):
return self._mkTicket('LT-', {
'service': service,
}, timeout=self.lt_lifespan)
return d.addCallback(cb)
def useLoginTicket(self, ticket, service):
"""
Use a login ticket.
"""
if not ticket.startswith("LT-"):
return defer.fail(InvalidTicket())
def doit(_):
d = self._useTicket(ticket)
def cb(data):
recorded_service = data[u'service']
if not are_urls_equal(recorded_service, service):
return defer.fail(InvalidService(
"Issued service '%s' does not match presented service '%s'." % (
recorded_service, service)))
return d.addCallback(cb)
return self._validService(service).addCallback(doit)
@defer.inlineCallbacks
def mkServiceTicket(self, service, tgt_id, primaryCredentials):
"""
Create a service ticket
"""
if not tgt_id.startswith("TGC-"):
raise InvalidTicket()
entry = yield self._fetch_ticket(tgt_id)
if entry is None:
raise InvalidTicket("Invalid TGT '%s'." % tgt_id)
del entry[u'_id']
del entry[u'_rev']
tgt = entry
yield self._validService(service)
ticket = yield self._mkTicket('ST-', {
'avatar_id': tgt['avatar_id'],
'service': service,
'primary_credentials': primaryCredentials,
'tgt': tgt_id,
}, self.st_lifespan)
#NOTE: The TGT data has just been fetched, and we are going to fetch it
# *again* in the call to `_informTGTOfService`. Seems like we should be
# able to skip that 2nd fetch for efficiency.
yield self._informTGTOfService(ticket, service, tgt_id)
defer.returnValue(ticket)
def useServiceTicket(self, ticket, service, requirePrimaryCredentials=False):
"""
Get the data associated with a service ticket.
"""
if not ticket.startswith("ST-"):
return defer.fail(InvalidTicketSpec())
return self._useServiceOrProxyTicket(ticket, service, requirePrimaryCredentials)
@defer.inlineCallbacks
def mkProxyTicket(self, service, pgt):
"""
Create a proxy ticket
"""
if not pgt.startswith("PGT-"):
raise InvalidTicket()
pgt_info = yield self._fetch_ticket(pgt)
if pgt_info is None:
raise InvalidTicket("PGT '%s' is invalid." % pgt)
pgturl = pgt_info['pgturl']
try:
tgt = pgt_info[u'tgt']
except KeyError:
raise InvalidTicket("PGT '%s' is invalid." % pgt)
yield self._validService(service)
pt = yield self._mkTicket('PT-', {
'avatar_id': pgt_info[u'avatar_id'],
'service': service,
'primary_credentials': False,
'pgturl': pgturl,
'pgt': pgt,
'tgt': tgt,
'proxy_chain': pgt_info[u'proxy_chain'],
}, self.pt_lifespan)
yield self._informTGTOfService(pt, service, tgt)
defer.returnValue(pt)
def useServiceOrProxyTicket(self, ticket, service, requirePrimaryCredentials=False):
"""
Get the data associated with a service ticket.
"""
return self._useServiceOrProxyTicket(ticket, service, requirePrimaryCredentials, True)
def _useServiceOrProxyTicket(self, ticket, service, requirePrimaryCredentials=False, _allow_pt=False):
"""
Get the data associated with a service or proxy ticket.
"""
if not ticket.startswith("ST-"):
if not ticket.startswith("PT-") and _allow_pt:
return defer.fail(InvalidTicket())
def doit(_):
d = self._useTicket(ticket)
def cb(data):
if not are_urls_equal(data[u'service'], service):
log.msg("[WARNING] ST service '{0}' != /serviceValidate service '{1}'".format(
data[u'service'], service))
return defer.fail(InvalidService(
"Issued service does not match validation service."))
if requirePrimaryCredentials and data['primary_credentials'] == False:
return defer.fail(InvalidTicket("This ticket was not issued in response to primary credentials."))
return data
return d.addCallback(cb)
return self._validService(service).addCallback(doit)
@defer.inlineCallbacks
def mkProxyGrantingTicket(self, service, ticket, tgt, pgturl, proxy_chain=None):
"""
Create Proxy Granting Ticket
"""
if not (ticket.startswith("ST-") or ticket.startswith("PT-")):
raise InvalidTicket()
tgt_info = yield self._fetch_ticket(tgt)
if tgt_info is None:
raise InvalidTicket("TGT '%s' is invalid." % tgt)
del tgt_info[u'_id']
del tgt_info[u'_rev']
yield self._validService(service)
charset = self.charset
iou = self._generate('PGTIOU-')
data = {
'avatar_id': tgt_info['avatar_id'],
'service': service,
'st_or_pt': ticket,
'iou': iou,
'tgt': tgt,
'pgturl': pgturl,
}
if proxy_chain is not None:
new_proxy_chain = list(proxy_chain)
new_proxy_chain.append(pgturl)
else:
new_proxy_chain = [pgturl]
data[u'proxy_chain'] = new_proxy_chain
pgt = yield self._mkTicket('PGT-', data, timeout=self.pgt_lifespan)
# NOTE: We just fetched the TGC above and as soon as we call
# `_informTGTOfPGT`, we will immediately fetch the TGT again.
# Seems like there ought to be a way to use the just-fetched TGC.
yield self._informTGTOfPGT(pgt, tgt)
defer.returnValue({'iou': iou, 'pgt': pgt})
def mkTicketGrantingCookie(self, avatar_id):
"""
Create a ticket to be used in a cookie.
"""
return self._mkTicket('TGC-', {'avatar_id': avatar_id}, timeout=self.tgt_lifespan)
def useTicketGrantingCookie(self, ticket, service):
"""
Get the user associated with this ticket.
"""
def use_ticket_cb(_):
return self._useTicket(ticket, _consume=False)
if service != "":
return self._isSSOService(service).addCallback(use_ticket_cb)
else:
return use_ticket_cb(None)
def expireTGT(self, ticket):
"""
Expire the TGT identified by 'ticket'.
"""
if not ticket.startswith("TGC-"):
return defer.fail(InvalidTicket())
d = self._useTicket(ticket)
def cb(data):
"""
Expire associated PGTs.
Perform SLO.
"""
#SLO
services = data.get('services', {})
self.reactor.callLater(0.0, self._notifyServicesSLO, services)
#PGTs
pgts = data.get(u'pgts', {})
for pgt in pgts:
self._expireTicket(pgt)
return None
def eb(failure):
failure.trap(InvalidTicket)
return d.addCallback(cb).addErrback(eb)
_samlLogoutTemplate = dedent("""\
<samlp:LogoutRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="%(identifier)s"
Version="2.0"
IssueInstant="%(issue_instant)s">
<saml:NameID>@NOT_USED@</saml:NameID>
<samlp:SessionIndex>%(service_ticket)s</samlp:SessionIndex>
</samlp:LogoutRequest>
""")
def _notifyServicesSLO(self, services):
template = self._samlLogoutTemplate
def logerr(err, service):
log.msg("Error sending SLO to service '%s'." % service)
log.err(err)
errs = unwrap_failures(err)
for error in errs:
log.err(error)
return err
dlist = []
for service, st in services.iteritems():
dt = datetime.datetime.utcnow()
issue_instant = dt.strftime("%Y-%m-%dT%H:%M:%S")
identifier = str(uuid.uuid4())
data = template % {
'identifier': xml_escape(identifier),
'issue_instant': xml_escape(issue_instant),
'service_ticket': xml_escape(st)
}
httpClient = self.httpClientFactory(self.reactor)
d = httpClient.post(
service.encode('utf-8'),
headers=Headers({'Content-Type': ['application/xml']}),
data=data.encode('utf-8'),
timeout=30).addCallback(
treq.content).addErrback(
logerr, service)
dlist.append(d)
return defer.DeferredList(dlist, consumeErrors=True)
def register_ticket_expiration_callback(self, callback):
"""
Register a function to be called when a ticket is expired.
The function should take 3 arguments, (ticket, data, explicit).
`ticket` is the ticket ID, `data` is a dict of the ticket data,
and `explicit` is a boolean that indicates whether the ticket
was explicitly expired (e.g. /logout, ST/PT validation) or
implicitly expired (e.g. timeout or parent ticket expired).
"""
self._expire_callback = callback
|
gpl-3.0
| -1,356,178,428,908,347,000 | 37.040417 | 118 | 0.546595 | false |
InsightSoftwareConsortium/ITKExamples
|
src/Core/Common/StreamAPipeline/Code.py
|
1
|
1786
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
from distutils.version import StrictVersion as VS
if VS(itk.Version.GetITKVersion()) < VS("4.10.0"):
print("ITK 4.10.0 is required.")
sys.exit(1)
if len(sys.argv) != 2:
print("Usage: " + sys.argv[0] + " <NumberOfSplits>")
sys.exit(1)
numberOfSplits = int(sys.argv[1])
Dimension = 2
PixelType = itk.UC
ImageType = itk.Image[PixelType, Dimension]
source = itk.RandomImageSource[ImageType].New()
size = itk.Size[Dimension]()
size.Fill(numberOfSplits)
source.SetSize(size)
monitorFilter = itk.PipelineMonitorImageFilter[ImageType].New()
monitorFilter.SetInput(source.GetOutput())
streamingFilter = itk.StreamingImageFilter[ImageType, ImageType].New()
streamingFilter.SetInput(monitorFilter.GetOutput())
streamingFilter.SetNumberOfStreamDivisions(numberOfSplits)
streamingFilter.Update()
print(
"The output LargestPossibleRegion is: "
+ str(streamingFilter.GetOutput().GetLargestPossibleRegion())
)
print("")
updatedRequestedRegions = monitorFilter.GetUpdatedRequestedRegions()
print("Updated ApplyAFilterOnlyToASpecifiedImageRegion's:")
for ii in range(len(updatedRequestedRegions)):
print(" " + str(updatedRequestedRegions[ii]))
|
apache-2.0
| -5,310,177,088,434,555,000 | 29.271186 | 74 | 0.759798 | false |
saltastro/timDIMM
|
analyze_cube.py
|
1
|
2145
|
#!/usr/bin/env python
import sys
import numpy as np
from find_boxes import rfits, daofind
pixel_scale = 1.22
dx = pixel_scale / 206265.0
rd = 0.13
d = 0.06
lamb = 0.65e-6
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(
abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()
)
row = data[int(x), :]
width_y = np.sqrt(
abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()
)
height = data.max()
strehl = (height / total) * (4.0 / np.pi) * (lamb / (d * dx)) ** 2
return height, strehl, x, y, width_x, width_y
def seeing(v):
b = rd / d
v = v * (pixel_scale / 206265.0) ** 2.0
k = 0.364 * (1.0 - 0.532 * b ** (-1.0 / 3.0) - 0.024 * b ** (-7.0 / 3.0))
seeing = 206265.0 * 0.98 * ((d / lamb) ** 0.2) * ((v / k) ** 0.6)
return seeing
def old_seeing(v):
v = v * (pixel_scale / 206265.0) ** 2.0
r0 = (2.0 * lamb * lamb * (0.1790 * (d ** (-1.0 / 3.0)) -
0.0968 * (rd ** (-1.0 / 3.0))
) / v) ** 0.6
seeing = 206265.0 * 0.98 * lamb / r0
return seeing
hdu = rfits(sys.argv[1])
image = hdu.data
hdr = hdu.header
x1 = []
y1 = []
c1 = []
x2 = []
y2 = []
c2 = []
for i in range(image.shape[0]):
n, test = daofind(image[i])
im = image[i]
v1 = np.var(im[0:50, 0:50])
v2 = np.var(im[270:319, 190:240])
v = (v1 + v2) / 2
if n == 2:
x1.append(test[0][0])
y1.append(test[0][1])
x2.append(test[1][0])
y2.append(test[1][1])
x1 = np.array(x1)
x2 = np.array(x2)
y1 = np.array(y1)
y2 = np.array(y2)
r = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
var = np.var(r)
print "Var = %f, Seeing = %f, Old Seeing = %f" % (float(var),
float(seeing(var)),
float(old_seeing(var)))
|
bsd-3-clause
| -1,732,175,487,684,662,800 | 23.94186 | 77 | 0.46993 | false |
nrz/ylikuutio
|
external/bullet3/examples/pybullet/examples/sleeping.py
|
2
|
1235
|
import pybullet as p
import time
useMaximalCoordinates = False
flags = p.URDF_ENABLE_SLEEPING
import pybullet_data
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.loadURDF("plane100.urdf", flags=flags, useMaximalCoordinates=useMaximalCoordinates)
#p.loadURDF("cube_small.urdf", [0,0,0.5], flags=flags)
r2d2 = -1
for k in range(5):
for i in range(5):
r2d2 = p.loadURDF("r2d2.urdf", [k * 2, i * 2, 1],
useMaximalCoordinates=useMaximalCoordinates,
flags=p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES + flags)
#enable sleeping: you can pass the flag during URDF loading, or do it afterwards
#p.changeDynamics(r2d2,-1,activationState=p.ACTIVATION_STATE_ENABLE_SLEEPING)
for j in range(p.getNumJoints(r2d2)):
p.setJointMotorControl2(r2d2, j, p.VELOCITY_CONTROL, targetVelocity=0)
print("r2d2=", r2d2)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
timestep = 1. / 240.
p.setTimeStep(timestep)
p.setGravity(0, 0, -10)
while p.isConnected():
p.stepSimulation()
time.sleep(timestep)
#force the object to wake up
p.changeDynamics(r2d2, -1, activationState=p.ACTIVATION_STATE_WAKE_UP)
|
agpl-3.0
| -7,437,638,780,242,373,000 | 32.378378 | 85 | 0.723887 | false |
mboman/vxcage-jobs
|
job-filemagic.py
|
1
|
2312
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import time
import tempfile
from pymongo import MongoClient
import gridfs
try:
import magic
except ImportError:
pass
from utils import get_file, clean_data, get_type, Config
JOBNAME = 'FILEMAGIC'
SLEEPTIME = 1
# create logger
logger = logging.getLogger(JOBNAME)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
logch = logging.StreamHandler()
logch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = \
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(logch)
client = MongoClient(host=Config().database.dbhost, port=Config().database.dbport)
db = client.vxcage
fs = gridfs.GridFS(db)
while True:
try:
for (sampleno, sample) in \
enumerate(db.fs.files.find({'filetype': {'$exists': False}},
timeout=False)):
try:
logger.info('[%s] Processing sample %s' % (sampleno,
sample['sha256']))
samplekey = {'sha256': sample['sha256']}
# download sample file
logger.debug('[%s] Downloading data' % sampleno)
data = get_file(db, sha256=sample['sha256'])
# Do analysis
logger.debug('[%s] Analysing' % sampleno)
file_type = get_type(data)
# Store results
logger.debug('[%s] Storing results into MongoDB'
% sampleno)
if file_type:
db.fs.files.update(samplekey,
{'$set': {'filetype': file_type}},
upsert=True)
# delete sample file
logger.debug('[%s] Deleting temporary data' % sampleno)
del samplekey
del data
logger.info('[%s] Metadata updated' % sampleno)
except Exception, e:
logger.exception(e)
pass
except Exception, e:
logger.exception(e)
pass
logger.info('Sleeping %s minutes' % SLEEPTIME)
time.sleep(SLEEPTIME * 60)
|
bsd-3-clause
| 7,086,037,901,708,765,000 | 24.130435 | 82 | 0.553201 | false |
blueyed/coveragepy
|
coverage/parser.py
|
1
|
45600
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Code parsing for coverage.py."""
import ast
import collections
import os
import re
import token
import tokenize
from coverage import env
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints, string_class
from coverage.bytecode import CodeObjects
from coverage.debug import short_stack
from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
from coverage.misc import NoSource, NotPython, StopEverything
from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser(object):
"""Parse code to find executable lines, excluded lines, etc.
This information is all based on static analysis: no code execution is
involved.
"""
@contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which the text will be read. Excluded lines are those that match
`exclude`, a regex.
"""
assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
)
self.exclude = exclude
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
self.statements = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
self.excluded = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
self.raw_statements = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
# The line numbers of class and function definitions.
self.raw_classdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
self._multiline = {}
# Lazily-created ByteParser, arc data, and missing arc descriptions.
self._byte_parser = None
self._all_arcs = None
self._missing_arc_fragments = None
@property
def byte_parser(self):
"""Create a ByteParser on demand."""
if not self._byte_parser:
self._byte_parser = ByteParser(self.text, filename=self.filename)
return self._byte_parser
def lines_matching(self, *regexes):
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
of the regexes in `regexes`. The entire line needn't match, just a
part of it.
"""
combined = join_regex(regexes)
if env.PY2:
combined = combined.decode("utf8")
regex_c = re.compile(combined)
matches = set()
for i, ltext in enumerate(self.lines, start=1):
if regex_c.search(ltext):
matches.add(i)
return matches
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: debugging
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
self.raw_excluded.add(elineno)
exclude_indent = indent
excluding = True
excluding_decorators = False
elif ttext == '@' and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
if excluding_decorators:
self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.raw_excluded.add(elineno)
first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
return self._multiline.get(line, line)
def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Returns a set of the first lines.
"""
return set(self.first_line(l) for l in lines)
def translate_lines(self, lines):
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
line of multi-line statements.
"""
try:
self._raw_parse()
except (tokenize.TokenError, IndentationError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
self.filename, err.args[0], lineno
)
)
self.excluded = self.first_lines(self.raw_excluded)
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
to the first line of multi-line statements.
"""
if self._all_arcs is None:
self._analyze_ast()
return self._all_arcs
def _analyze_ast(self):
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
"""
aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
aaa.analyze()
self._all_arcs = set()
for l1, l2 in aaa.arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
self._all_arcs.add((fl1, fl2))
self._missing_arc_fragments = aaa.missing_arc_fragments
def exit_counts(self):
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
def missing_arc_description(self, start, end, executed_arcs=None):
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
actual_start = start
if (
executed_arcs and
end < 0 and end == -start and
(end, start) not in executed_arcs and
(end, start) in self._missing_arc_fragments
):
# It's a one-line callable, and we never even started it,
# and we have a message about not starting it.
start, end = end, start
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
for fragment_pair in fragment_pairs:
smsg, emsg = fragment_pair
if emsg is None:
if end < 0:
# Hmm, maybe we have a one-line callable, let's check.
if (-end, end) in self._missing_arc_fragments:
return self.missing_arc_description(-end, end)
emsg = "didn't jump to the function exit"
else:
emsg = "didn't jump to line {lineno}"
emsg = emsg.format(lineno=end)
msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
if smsg is not None:
msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
msgs.append(msg)
return " or ".join(msgs)
class ByteParser(object):
"""Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
)
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
for attr in ['co_lnotab', 'co_firstlineno']:
if not hasattr(self.code, attr):
raise StopEverything( # pragma: only jython
"This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under another Python for this command."
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return (ByteParser(self.text, code=c) for c in children)
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
Only byte offsets that correspond to line numbers are included in the
results.
"""
# Adapted from dis.py in the standard library.
byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l
#
# AST analysis
#
class LoopBlock(object):
"""A block on the block stack representing a `for` or `while` loop."""
@contract(start=int)
def __init__(self, start):
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
self.break_exits = set()
class FunctionBlock(object):
"""A block on the block stack representing a function definition."""
@contract(start=int, name=str)
def __init__(self, start, name):
# The line number where the function starts.
self.start = start
# The name of the function.
self.name = name
class TryBlock(object):
"""A block on the block stack representing a `try` block."""
@contract(handler_start='int|None', final_start='int|None')
def __init__(self, handler_start, final_start):
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
self.final_start = final_start
# The ArcStarts for breaks/continues/returns/raises inside the "try:"
# that need to route through the "finally:" clause.
self.break_from = set()
self.continue_from = set()
self.return_from = set()
self.raise_from = set()
class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
"""The information needed to start an arc.
`lineno` is the line number the arc starts from.
`cause` is an English text fragment used as the `startmsg` for
AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
arc wasn't executed, so should fit well into a sentence of the form,
"Line 17 didn't run because {cause}." The fragment can include "{lineno}"
to have `lineno` interpolated into it.
"""
def __new__(cls, lineno, cause=None):
return super(ArcStart, cls).__new__(cls, lineno, cause)
# Define contract words that PyContract doesn't have.
# ArcStarts is for a list or set of ArcStart's.
new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
# Turn on AST dumps with an environment variable.
AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
class NodeList(object):
"""A synthetic fictitious node, containing a sequence of nodes.
This is used when collapsing optimized if-statements, to represent the
unconditional execution of one of the clauses.
"""
def __init__(self, body):
self.body = body
self.lineno = body[0].lineno
class AstArcAnalyzer(object):
"""Analyze source text with an AST to find executable code paths."""
@contract(text='unicode', statements=set)
def __init__(self, text, statements, multiline):
self.root_node = ast.parse(neuter_encoding_declaration(text))
# TODO: I think this is happening in too many places.
self.statements = set(multiline.get(l, l) for l in statements)
self.multiline = multiline
if AST_DUMP: # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print("Statements: {0}".format(self.statements))
print("Multiline map: {0}".format(self.multiline))
ast_dump(self.root_node)
self.arcs = set()
# A map from arc pairs to a list of pairs of sentence fragments:
# { (start, end): [(startmsg, endmsg), ...], }
#
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments = collections.defaultdict(list)
self.block_stack = []
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
def analyze(self):
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
pairs.
"""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
def add_arc(self, start, end, smsg=None, emsg=None):
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
print(short_stack(limit=6))
self.arcs.add((start, end))
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
def nearest_blocks(self):
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
@contract(returns=int)
def line_for_node(self, node):
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_line__" + node_name, None)
if handler is not None:
return handler(node)
else:
return node.lineno
def _line__Assign(self, node):
return self.line_for_node(node.value)
def _line__Dict(self, node):
# Python 3.5 changed how dict literals are made.
if env.PYVERSION >= (3, 5) and node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{'a':1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
def _line__List(self, node):
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node):
if node.body:
return self.line_for_node(node.body[0])
else:
# Empty modules have no line number, they always start at 1.
return 1
# The node types that just flow to the next node with no complications.
OK_TO_DEFAULT = set([
"Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
"Import", "ImportFrom", "Nonlocal", "Pass", "Print",
])
@contract(returns='ArcStarts')
def add_arcs(self, node):
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next. Because a
node represents an entire sub-tree (including its children), the exits
from a node can be arbitrarily complex::
if something(1):
if other(2):
doit(3)
else:
doit(5)
There are two exits from line 1: they start at line 3 and line 5.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_handle__" + node_name, None)
if handler is not None:
return handler(node)
else:
# No handler: either it's something that's ok to default (a simple
# statement), or it's something we overlooked. Change this 0 to 1
# to see if it's overlooked.
if 0:
if node_name not in self.OK_TO_DEFAULT:
print("*** Unhandled: {0}".format(node))
# Default for simple statements: one exit from this node.
return set([ArcStart(self.line_for_node(node))])
@one_of("from_start, prev_starts")
@contract(returns='ArcStarts')
def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
be the previous line in flow before this body. `prev_starts` is a set
of ArcStarts that can be the previous line. Only one of them should be
given.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
prev_starts = set([from_start])
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
body_node = self.find_non_missing_node(body_node)
if body_node is None:
continue
lineno = self.line_for_node(body_node)
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
def find_non_missing_node(self, node):
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
to find a child node in self.statements.
Returns a node, or None if none of the node remains.
"""
# This repeats work just done in add_body_arcs, but this duplication
# means we can avoid a function call in the 99.9999% case of not
# optimizing away statements.
lineno = self.line_for_node(node)
first_line = self.multiline.get(lineno, lineno)
if first_line in self.statements:
return node
missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
if missing_fn:
node = missing_fn(node)
else:
node = None
return node
def _missing__If(self, node):
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
non_missing = self.find_non_missing_node(NodeList(node.body))
if non_missing:
return non_missing
if node.orelse:
return self.find_non_missing_node(NodeList(node.orelse))
return None
def _missing__NodeList(self, node):
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
for child in node.body:
child = self.find_non_missing_node(child)
if child is not None:
non_missing_children.append(child)
# Return the simplest representation of the present children.
if not non_missing_children:
return None
if len(non_missing_children) == 1:
return non_missing_children[0]
return NodeList(non_missing_children)
def is_constant_expr(self, node):
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["NameConstant", "Num"]:
return "Num"
elif node_name == "Name":
if node.id in ["True", "False", "None", "__debug__"]:
return "Name"
return None
# In the fullness of time, these might be good tests to write:
# while EXPR:
# while False:
# listcomps hidden deep in other expressions
# listcomps hidden in lists: x = [[i for i in range(10)]]
# nested function definitions
# Exit processing: process_*_exits
#
# These functions process the four kinds of jump exits: break, continue,
# raise, and return. To figure out where an exit goes, we have to look at
# the block stack context. For example, a break will jump to the nearest
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
@contract(exits='ArcStarts')
def process_break_exits(self, exits):
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
block.break_exits.update(exits)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.break_from.update(exits)
break
@contract(exits='ArcStarts')
def process_continue_exits(self, exits):
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
for xit in exits:
self.add_arc(xit.lineno, block.start, xit.cause)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.continue_from.update(exits)
break
@contract(exits='ArcStarts')
def process_raise_exits(self, exits):
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock):
if block.handler_start is not None:
for xit in exits:
self.add_arc(xit.lineno, block.handler_start, xit.cause)
break
elif block.final_start is not None:
block.raise_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't except from function '{0}'".format(block.name),
)
break
@contract(exits='ArcStarts')
def process_return_exits(self, exits):
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock) and block.final_start is not None:
block.return_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't return from function '{0}'".format(block.name),
)
break
# Handlers: _handle__*
#
# Each handler deals with a specific AST node type, dispatched from
# add_arcs. Each deals with a particular kind of node type, and returns
# the set of exits from that node. These functions mirror the Python
# semantics of each syntactic construct. See the docstring for add_arcs to
# understand the concept of exits from a node.
@contract(returns='ArcStarts')
def _handle__Break(self, node):
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits([break_start])
return set()
@contract(returns='ArcStarts')
def _handle_decorated(self, node):
"""Add arcs for things that can be decorated (classes and functions)."""
last = self.line_for_node(node)
if node.decorator_list:
for dec_node in node.decorator_list:
dec_start = self.line_for_node(dec_node)
if dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
if node.body:
body_start = self.line_for_node(node.body[0])
body_start = self.multiline.get(body_start, body_start)
for lineno in range(last+1, body_start):
if lineno in self.statements:
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
return set([ArcStart(last)])
_handle__ClassDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__Continue(self, node):
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits([continue_start])
return set()
@contract(returns='ArcStarts')
def _handle__For(self, node):
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.add_body_arcs(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__If(self, node):
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
@contract(returns='ArcStarts')
def _handle__NodeList(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
@contract(returns='ArcStarts')
def _handle__Raise(self, node):
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits([raise_start])
# `raise` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Return(self, node):
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits([return_start])
# `return` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Try(self, node):
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
try_block = TryBlock(handler_start, final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
if node.handlers:
# If there are `except` clauses, then raises in the try body
# will already jump to them. Start this set over for raises in
# `except` and `else`.
try_block.raise_from = set([])
else:
self.block_stack.pop()
handler_exits = set()
if node.handlers:
last_handler_start = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
self.add_arc(last_handler_start, handler_start)
last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.add_body_arcs(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = ( # You can get to the `finally` clause from:
exits | # the exits of the body or `else` clause,
try_block.break_from | # or a `break`,
try_block.continue_from | # or a `continue`,
try_block.raise_from | # or a `raise`,
try_block.return_from # or a `return`.
)
final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
if try_block.break_from:
self.process_break_exits(
self._combine_finally_starts(try_block.break_from, final_exits)
)
if try_block.continue_from:
self.process_continue_exits(
self._combine_finally_starts(try_block.continue_from, final_exits)
)
if try_block.raise_from:
self.process_raise_exits(
self._combine_finally_starts(try_block.raise_from, final_exits)
)
if try_block.return_from:
self.process_return_exits(
self._combine_finally_starts(try_block.return_from, final_exits)
)
if exits:
# The finally clause's exits are only exits for the try block
# as a whole if the try block had some exits to begin with.
exits = final_exits
return exits
@contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
def _combine_finally_starts(self, starts, exits):
"""Helper for building the cause of `finally` branches.
"finally" clauses might not execute their exits, and the causes could
be due to a failure to execute any of the exits in the try block. So
we use the causes from `starts` as the causes for `exits`.
"""
causes = []
for start in sorted(starts):
if start.cause is not None:
causes.append(start.cause.format(lineno=start.lineno))
cause = " or ".join(causes)
exits = set(ArcStart(xit.lineno, cause) for xit in exits)
return exits
@contract(returns='ArcStarts')
def _handle__TryExcept(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryExcept, it means there was no finally, so fake it, and treat as
# a general Try node.
node.finalbody = []
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__TryFinally(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryFinally, see if there's a TryExcept nested inside. If so, merge
# them. Otherwise, fake fields to complete a Try node.
node.handlers = []
node.orelse = []
first = node.body[0]
if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
assert len(node.body) == 1
node.body = first.body
node.handlers = first.handlers
node.orelse = first.orelse
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__While(self, node):
constant_test = self.is_constant_expr(node.test)
start = to_top = self.line_for_node(node.test)
if constant_test and (env.PY3 or constant_test == "Num"):
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=to_top))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__With(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
_handle__AsyncWith = _handle__With
def _code_object__Module(self, node):
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
else:
# Empty module.
self.add_arc(-start, start)
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node):
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
for xit in exits:
self.add_arc(
xit.lineno, -start, xit.cause,
"didn't exit the body of class '{0}'".format(node.name),
)
def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
"""A function to make methods for online callable _code_object__ methods."""
def _code_object__oneline_callable(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start, None, "didn't run the {0} on line {1}".format(noun, start))
self.add_arc(
start, -start, None,
"didn't finish the {0} on line {1}".format(noun, start),
)
return _code_object__oneline_callable
_code_object__Lambda = _make_oneline_code_method("lambda")
_code_object__GeneratorExp = _make_oneline_code_method("generator expression")
_code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
_code_object__SetComp = _make_oneline_code_method("set comprehension")
if env.PY3:
_code_object__ListComp = _make_oneline_code_method("list comprehension")
if AST_DUMP: # pragma: debugging
# Code only used when dumping the AST for debugging.
SKIP_DUMP_FIELDS = ["ctx"]
def _is_simple_value(value):
"""Is `value` simple enough to be displayed on a single line?"""
return (
value in [None, [], (), {}, set()] or
isinstance(value, (string_class, int, float))
)
def ast_dump(node, depth=0):
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
"""
indent = " " * depth
if not isinstance(node, ast.AST):
print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
return
lineno = getattr(node, "lineno", None)
if lineno is not None:
linemark = " @ {0}".format(node.lineno)
else:
linemark = ""
head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
named_fields = [
(name, value)
for name, value in ast.iter_fields(node)
if name not in SKIP_DUMP_FIELDS
]
if not named_fields:
print("{0}>".format(head))
elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
field_name, value = named_fields[0]
print("{0} {1}: {2!r}>".format(head, field_name, value))
else:
print(head)
if 0:
print("{0}# mro: {1}".format(
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
for field_name, value in named_fields:
prefix = "{0}{1}:".format(next_indent, field_name)
if _is_simple_value(value):
print("{0} {1!r}".format(prefix, value))
elif isinstance(value, list):
print("{0} [".format(prefix))
for n in value:
ast_dump(n, depth + 8)
print("{0}]".format(next_indent))
else:
print(prefix)
ast_dump(value, depth + 8)
print("{0}>".format(indent))
|
apache-2.0
| -742,420,493,542,686,700 | 37.77551 | 99 | 0.573136 | false |
endlessm/chromium-browser
|
third_party/chromite/api/gen/test_platform/steps/execute/build_pb2.py
|
1
|
2541
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test_platform/steps/execute/build.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='test_platform/steps/execute/build.proto',
package='test_platform.steps.execute',
syntax='proto3',
serialized_options=_b('ZEgo.chromium.org/chromiumos/infra/proto/go/test_platform/steps/execute'),
serialized_pb=_b('\n\'test_platform/steps/execute/build.proto\x12\x1btest_platform.steps.execute\x1a\x1fgoogle/protobuf/timestamp.proto\"8\n\x05\x42uild\x12/\n\x0b\x63reate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampBGZEgo.chromium.org/chromiumos/infra/proto/go/test_platform/steps/executeb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_BUILD = _descriptor.Descriptor(
name='Build',
full_name='test_platform.steps.execute.Build',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create_time', full_name='test_platform.steps.execute.Build.create_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=161,
)
_BUILD.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['Build'] = _BUILD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Build = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), dict(
DESCRIPTOR = _BUILD,
__module__ = 'test_platform.steps.execute.build_pb2'
# @@protoc_insertion_point(class_scope:test_platform.steps.execute.Build)
))
_sym_db.RegisterMessage(Build)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
bsd-3-clause
| 8,662,193,320,717,727,000 | 33.808219 | 317 | 0.744982 | false |
alexshepard/aledison
|
contacts.py
|
1
|
1436
|
#!/usr/bin/python
import os.path
import pickle
import yaml
config = yaml.safe_load(open("config.yml"))
contacts_filename = config["contacts"]["filename"]
class Contacts:
"""An address book, with entries for people."""
def get_contacts(self):
if (os.path.isfile(contacts_filename)):
return pickle.load(open(contacts_filename, "rb"))
else:
return []
def save_contacts(self, contacts):
pickle.dump(contacts, open(contacts_filename, "wb"))
def add_contact(self, contact):
contacts = self.get_contacts()
contacts.append(contact)
self.save_contacts(contacts)
def delete_contact(self, contact):
contacts = self.get_contacts()
for candidate in contacts:
if candidate.name == contact.name and candidate.number == contact.number:
contacts.remove(candidate)
self.save_contacts(contacts)
return True
return False
def find_contact_by_number(self, number):
for contact in self.get_contacts():
if contact.number == number:
return contact
return None
def find_contact_by_name(self, name):
for contact in self.get_contacts():
if contact.name == name:
return contact
return None
class Contact:
""" A data structure encapsulating a person. """
# default data
name = "Nobody"
number = "+19856552500" # rickroll
def __init__(self, name, number):
self.name = name
self.number = number
|
mit
| -1,437,390,145,994,924,000 | 24.210526 | 79 | 0.662953 | false |
mpurg/qtools
|
packages/Qpyl/core/qstructure.py
|
1
|
13331
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module contains the QStruct class for handling Q structure files (PDB, mol2).
Additionally, it implements methods for finding and replacing atom
placeholders (e.g. $1.N$)
"""
from __future__ import absolute_import, unicode_literals, division
from six.moves import map
import re
import logging
from collections import namedtuple
from Qpyl.common import raise_or_log
logger = logging.getLogger(__name__)
PosVector = namedtuple("PosVector", ["x", "y", "z"])
_PLACEHOLDER_RE = re.compile("\$\S+\.\S+\$")
_COMMENTS_RE = re.compile(r"[#\!].*")
def find_placeholders(inputstring):
"""Find atom placeholders of the form $514.C3$
It ignores comments (characters following # or !)
See also QStruct.convert_placeholders
"""
tmp = re.sub(_COMMENTS_RE, "", inputstring)
return _PLACEHOLDER_RE.findall(tmp)
class QStructError(Exception):
pass
class QStruct(object):
"""
Class for processing the structure (coordinates)
Args:
filename (path to the structure file)
filetype (type of structure file: 'pdb' or 'mol2')
ignore_errors (boolean): Optional, default is False.\
If set to True, some non-vital\
exceptions are logged instead.
In contrast to QLib and QPrm, the 'read' methods in this
class should not be called since the object should
contain data from only one structure file.
The structure data is stored in three lists:
atoms, residues, molecules
which contain _StructAtom, _StructResidue and _StructMolecule
objects.
"""
def __init__(self, filename, filetype, ignore_errors=False):
self.ignore_errors = ignore_errors
FILE_TYPES = {'pdb': self._read_pdb,
'mol2': self._read_mol2}
self.filetype = filetype.lower()
if self.filetype not in FILE_TYPES:
raise QStructError("Filetype {} not supported. Use {}"
.format(filetype,
" or ".join(FILE_TYPES)))
self.atoms = []
self.residues = []
self.molecules = []
self.filename = filename
# TODO: some sort of lookup hashes if needed
# run the parser function
FILE_TYPES[self.filetype](filename)
# check if we actually got something
for t in ["atoms", "residues", "molecules"]:
if len(self.__dict__[t]) == 0:
raise QStructError("No {} found, check file '{}' and"
" filetype '{}'".format(t,
self.filename,
self.filetype))
def _read_mol2(self, mol2_file):
"""
Read and parse a mol2 file for coordinates.
Args:
mol2_file (string): name/path of file
"""
molecule = None
residue = None
aindex, old_aindex = None, None
section = None
for line in open(mol2_file, 'r').readlines():
if line.startswith("@<TRIPOS>"):
section = line.replace("@<TRIPOS>", "").strip()
if section == "MOLECULE":
if molecule != None:
self.molecules.append(molecule)
molecule = _StructMolecule(self)
continue
if section == "ATOM":
if aindex != None:
old_aindex = aindex
lf = line.split()
aindex, aname = int(lf[0]), lf[1]
x, y, z = map(float, lf[2:5])
rindex = int(lf[6])
rname = lf[7][0:3].upper()
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad Mol2 format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad Mol2 format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
# append last one after parsing
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def _read_pdb(self, pdb_file):
"""
Read and parse a PDB file for coordinates.
Args:
pdb_file (string): name/path of file
"""
# make a new _StructMolecule object
molecule = _StructMolecule(self)
# parse the PDB file
residue = None
aindex, old_aindex = None, None
for line in open(pdb_file, 'r').readlines():
if line.startswith("ATOM") or line.startswith("HETATM"):
if aindex != None:
old_aindex = aindex
aindex = int(line[6:12])
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad PDB format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
aname = line[12:17].strip()
rname = line[17:20].strip().upper()
rindex = int(line[22:26])
x, y, z = map(float, (line[30:38], line[38:46], line[46:54]))
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad PDB format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
if aname in [a.name for a in residue.atoms]:
raise_or_log("Bad PDB format - two atoms with same name "
"({}) in residue {}.{}"
"".format(aname, rname, rindex),
QStructError, logger, self.ignore_errors)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
elif line.startswith("TER") or line.startswith("GAP"):
self.molecules.append(molecule)
residue = None
molecule = _StructMolecule(self)
# append last one if it didn't gave a TER/GAP
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def convert_placeholders(self, inputstring):
"""Convert atom placeholders ($514.C3$) to indexes.
Placeholders are a combination of the residue id and
atom name, encapsulated in $$ - $RESID.ATOM_NAME$
In addition,there are some special values:
$LAST.ID$ - id of last atom in the system
Arguments:
inputstring (string): string with placeholders (input file contents)
Returns:
outputstring (string): converted string
"""
id_map = {"{}.{}".format(a.residue.index, a.name): str(a.index)
for a in self.atoms}
last_id = "{}.{}".format(self.atoms[-1].residue.index,
self.atoms[-1].name)
outputstring = ""
for line in inputstring.split("\n"):
comment = ""
if "#" in line:
i = line.index("#")
line, comment = line[:i], line[i:]
c = find_placeholders(line)
for pid in c:
pid = pid.strip("$")
pid2 = pid.replace("LAST.ID", last_id)
try:
padding = (len(pid2)+2 - len(id_map[pid2])) * " "
except KeyError:
raise QStructError("Atom '${}$' does not exist in the pdb "
"structure.".format(pid2))
line = re.sub("\$" + pid + "\$", id_map[pid2] + padding, line)
outputstring += line + comment + "\n"
return outputstring
class _StructAtom(object):
"""Contains structural information for an atom.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): atom name
x,y,z (float): coordinates
residue (_StructResidue): parent residue object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the atom
in the atom list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, x, y, z, residue, structure):
self.index_struct = int(index_struct)
self.name = name
self.coordinates = PosVector(float(x), float(y), float(z))
self.residue = residue
self.structure = structure
@property
def index(self):
return self.structure.atoms.index(self) + 1
def __repr__(self):
res = self.residue
mol = res.molecule
return "_StructAtom: {}.{}.{}".format(mol.index,
res.index,
self.index)
class _StructResidue(object):
"""Contains structural information for a residue.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): residue name
molecule (_StructMolecule): parent molecule object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the residue
in the residue list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, molecule, structure):
self.atoms = []
self.index_struct = int(index_struct)
self.name = name
self.molecule = molecule
self.structure = structure
@property
def index(self):
return self.structure.residues.index(self) + 1
def add_atom(self, atom):
self.atoms.append(atom)
def __repr__(self):
mol = self.molecule
return "_StructResidue: {}.{}{}".format(mol.index,
self.name,
self.index)
class _StructMolecule(object):
"""Contains structural information for a molecule.
Arguments:
structure (_QStruct): parent structure object
Special property is 'index' (int). It is the actual
1-based index of the molecule in the residue list (as it was appended).
This should corresponds to the index in the generated topology.
"""
def __init__(self, structure):
self.residues = []
self.structure = structure
@property
def index(self):
return self.structure.molecules.index(self) + 1
def add_residue(self, residue):
self.residues.append(residue)
def __repr__(self):
return "_StructMolecule: {}".format(self.index)
|
mit
| -7,656,438,831,662,270,000 | 34.454787 | 82 | 0.547221 | false |
somic/paasta
|
tests/autoscaling/test_autoscaling_cluster_lib.py
|
1
|
82470
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import contextlib
import unittest
import warnings
from math import floor
import mock
from botocore.exceptions import ClientError
from pytest import raises
from requests.exceptions import HTTPError
from paasta_tools.autoscaling import autoscaling_cluster_lib
from paasta_tools.mesos_tools import SlaveTaskCount
from paasta_tools.metrics.metastatus_lib import ResourceInfo
from paasta_tools.utils import TimeoutError
warnings.filterwarnings("error", category=RuntimeWarning)
asyncio.get_event_loop().set_debug(True)
def _run(coro):
asyncio.set_event_loop(asyncio.new_event_loop())
asyncio.get_event_loop().set_debug(True)
return asyncio.get_event_loop().run_until_complete(coro)
def get_coro_with_exception(error):
async def f(*args, **kwargs):
await asyncio.sleep(0)
raise error
return f
class AsyncNone(object):
"""Same as asyncio.sleep(0), but needed to be able to patch asyncio.sleep"""
def __await__(self):
yield
async def just_sleep(*a, **k):
await AsyncNone()
def pid_to_ip_sideeffect(pid):
pid_to_ip = {
'pid1': '10.1.1.1',
'pid2': '10.2.2.2',
'pid3': '10.3.3.3',
}
return pid_to_ip[pid]
def is_resource_cancelled_sideeffect(self):
if self.resource['id'] == 'sfr-blah3':
return True
return False
def test_get_mesos_utilization_error():
mock_system_config = mock.Mock(return_value={})
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_resource_utilization_by_grouping',
autospec=True,
) as mock_get_resource_utilization_by_grouping:
mock_mesos_state = {'slaves': [
{'attributes': {'pool': 'default'}},
{'attributes': {'pool': 'default'}},
]}
mock_utilization = {
'free': ResourceInfo(cpus=7.0, mem=2048.0, disk=30.0),
'total': ResourceInfo(cpus=10.0, mem=4096.0, disk=40.0),
}
mock_get_resource_utilization_by_grouping.return_value = {('default', 'westeros-1'): mock_utilization}
ret = autoscaling_cluster_lib.get_mesos_utilization_error(
mesos_state=mock_mesos_state,
system_config=mock_system_config,
region="westeros-1",
pool="default",
target_utilization=0.8,
)
assert ret == 0.5 - 0.8
ret = autoscaling_cluster_lib.get_mesos_utilization_error(
mesos_state=mock_mesos_state,
system_config=mock_system_config,
region="westeros-1",
pool="fake-pool",
target_utilization=0.8,
)
assert ret == 0
def test_get_instances_from_ip():
mock_instances = []
ret = autoscaling_cluster_lib.get_instances_from_ip('10.1.1.1', mock_instances)
assert ret == []
mock_instances = [{'InstanceId': 'i-blah', 'PrivateIpAddress': '10.1.1.1'}]
ret = autoscaling_cluster_lib.get_instances_from_ip('10.1.1.1', mock_instances)
assert ret == mock_instances
def test_autoscale_local_cluster_with_cancelled():
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.load_system_paasta_config', autospec=True,
) as mock_get_paasta_config, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.autoscale_cluster_resource', autospec=True,
) as mock_autoscale_cluster_resource, mock.patch(
'time.sleep', autospec=True,
), mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.asyncio.sleep', autospec=True, side_effect=just_sleep,
), mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.is_resource_cancelled',
autospec=True,
) as mock_is_resource_cancelled, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_sfr', autospec=True,
) as mock_get_sfr, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_all_utilization_errors', autospec=True,
) as mock_get_all_utilization_errors, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_master', autospec=True,
) as mock_get_mesos_master, mock.patch(
'paasta_tools.metrics.metrics_lib.load_system_paasta_config', autospec=True,
) as mock_get_metrics_system_paasta_config:
mock_get_sfr.return_value = False
mock_scaling_resources = {
'id1': {
'id': 'sfr-blah1', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
'id2': {
'id': 'sfr-blah2', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
'id3': {
'id': 'sfr-blah3', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
}
mock_resource_pool_settings = {'default': {'drain_timeout': 123, 'target_utilization': 0.75}}
mock_get_cluster_autoscaling_resources = mock.Mock(return_value=mock_scaling_resources)
mock_get_resource_pool_settings = mock.Mock(return_value=mock_resource_pool_settings)
mock_is_resource_cancelled.side_effect = is_resource_cancelled_sideeffect
mock_get_resources = mock.Mock(
get_cluster_autoscaling_resources=mock_get_cluster_autoscaling_resources,
get_resource_pool_settings=mock_get_resource_pool_settings,
get_metrics_provider=lambda: None,
)
mock_get_paasta_config.return_value = mock_get_resources
mock_get_metrics_system_paasta_config.return_value = mock_get_resources
mock_get_all_utilization_errors.return_value = {
('westeros-1', 'default'): -0.2,
}
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(state=mock_mesos_state)
mock_get_mesos_master.return_value = mock_master
calls = []
async def fake_autoscale(scaler, state):
calls.append(scaler)
await asyncio.sleep(0)
mock_autoscale_cluster_resource.side_effect = fake_autoscale
asyncio.set_event_loop(asyncio.new_event_loop())
autoscaling_cluster_lib.autoscale_local_cluster(config_folder='/nail/blah')
assert mock_get_paasta_config.called
autoscaled_resources = [call[0][0].resource for call in mock_autoscale_cluster_resource.call_args_list]
assert autoscaled_resources[0] == mock_scaling_resources['id3']
assert len(calls) == 1
def test_autoscale_local_cluster():
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.load_system_paasta_config', autospec=True,
) as mock_get_paasta_config, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.autoscale_cluster_resource', autospec=True,
) as mock_autoscale_cluster_resource, mock.patch(
'time.sleep', autospec=True,
), mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.asyncio.sleep', autospec=True,
) as mock_sleep, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.is_resource_cancelled',
autospec=True,
) as mock_is_resource_cancelled, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_sfr', autospec=True,
) as mock_get_sfr, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_all_utilization_errors', autospec=True,
) as mock_get_all_utilization_errors, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_master', autospec=True,
) as mock_get_mesos_master, mock.patch(
'paasta_tools.metrics.metrics_lib.load_system_paasta_config', autospec=True,
) as mock_get_metrics_system_paasta_config:
mock_sleep.side_effect = just_sleep
mock_get_sfr.return_value = False
mock_scaling_resources = {
'id1': {
'id': 'sfr-blah1', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
'id2': {
'id': 'sfr-blah2', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
'id4': {
'id': 'sfr-blah4', 'type': 'aws_spot_fleet_request',
'pool': 'default', 'region': 'westeros-1',
},
}
mock_resource_pool_settings = {'default': {'drain_timeout': 123, 'target_utilization': 0.75}}
mock_get_cluster_autoscaling_resources = mock.Mock(return_value=mock_scaling_resources)
mock_get_resource_pool_settings = mock.Mock(return_value=mock_resource_pool_settings)
mock_is_resource_cancelled.side_effect = is_resource_cancelled_sideeffect
mock_get_resources = mock.Mock(
get_cluster_autoscaling_resources=mock_get_cluster_autoscaling_resources,
get_resource_pool_settings=mock_get_resource_pool_settings,
get_metrics_provider=lambda: None,
)
mock_get_paasta_config.return_value = mock_get_resources
mock_get_metrics_system_paasta_config.return_value = mock_get_resources
mock_get_all_utilization_errors.return_value = {
('westeros-1', 'default'): -0.2,
}
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(state=mock_mesos_state)
mock_get_mesos_master.return_value = mock_master
calls = []
async def fake_autoscale(scaler, state):
calls.append(scaler)
await asyncio.sleep(0)
mock_autoscale_cluster_resource.side_effect = fake_autoscale
asyncio.set_event_loop(asyncio.new_event_loop())
autoscaling_cluster_lib.autoscale_local_cluster(config_folder='/nail/blah')
assert mock_get_paasta_config.called
autoscaled_resources = [call[0][0].resource for call in mock_autoscale_cluster_resource.call_args_list]
assert mock_scaling_resources['id2'] in autoscaled_resources
assert mock_scaling_resources['id1'] in autoscaled_resources
assert mock_scaling_resources['id4'] in autoscaled_resources
assert len(calls) == 3
def test_filter_scalers():
resource1 = mock.Mock(is_resource_cancelled=lambda: False)
resource2 = mock.Mock(is_resource_cancelled=lambda: False)
resource3 = mock.Mock(is_resource_cancelled=lambda: True)
resource4 = mock.Mock(is_resource_cancelled=lambda: False)
resource5 = mock.Mock(is_resource_cancelled=lambda: True)
resource6 = mock.Mock(is_resource_cancelled=lambda: False)
autoscaling_scalers = {
('westeros-1', 'default'): [resource1, resource2],
('westeros-2', 'default'): [resource3, resource4],
('westeros-3', 'default'): [resource5, resource6],
}
utilization_errors = {
('westeros-1', 'default'): -0.2,
('westeros-2', 'default'): -0.2,
('westeros-3', 'default'): 0.2,
}
ret = autoscaling_cluster_lib.filter_scalers(autoscaling_scalers, utilization_errors)
assert len(ret) == 5
assert resource4 not in ret
def test_autoscale_cluster_resource():
call = []
async def mock_scale_resource(current, target):
call.append((current, target))
await asyncio.sleep(0)
mock_scaling_resource = {'id': 'sfr-blah', 'type': 'sfr', 'pool': 'default'}
mock_scaler = mock.Mock()
mock_metrics_provider = mock.Mock(return_value=(2, 6))
mock_scaler.metrics_provider = mock_metrics_provider
mock_scaler.scale_resource = mock_scale_resource
mock_scaler.resource = mock_scaling_resource
mock_state = mock.Mock()
# test scale up
_run(autoscaling_cluster_lib.autoscale_cluster_resource(mock_scaler, mock_state))
assert mock_metrics_provider.called
assert (2, 6) in call
def test_get_autoscaling_info_for_all_resources():
mock_resource_1 = {'region': 'westeros-1', 'pool': 'default'}
mock_resource_2 = {'region': 'westeros-1', 'pool': 'not-default'}
mock_resources = {
'id1': mock_resource_1,
'id2': mock_resource_2,
}
mock_get_cluster_autoscaling_resources = mock.Mock(return_value=mock_resources)
mock_system_config = mock.Mock(
get_cluster_autoscaling_resources=mock_get_cluster_autoscaling_resources,
get_resource_pool_settings=mock.Mock(return_value={}),
)
mock_autoscaling_info = mock.Mock()
def mock_autoscaling_info_for_resource_side_effect(resource, pool_settings, state, utilization_errors):
return {
(mock_resource_1['region'], mock_resource_1['pool'],): None,
(mock_resource_2['region'], mock_resource_2['pool'],): mock_autoscaling_info,
}[(resource['region'], resource['pool'],)]
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.load_system_paasta_config', autospec=True,
return_value=mock_system_config,
), mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.autoscaling_info_for_resource', autospec=True,
) as mock_autoscaling_info_for_resource, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_utilization_error', autospec=True,
) as mock_get_utilization_error:
mock_autoscaling_info_for_resource.side_effect = mock_autoscaling_info_for_resource_side_effect
mock_state = mock.Mock()
mock_get_utilization_error.return_value = 0
ret = autoscaling_cluster_lib.get_autoscaling_info_for_all_resources(mock_state)
utilization_errors = autoscaling_cluster_lib.get_all_utilization_errors(
mock_resources, {}, mock_state, mock_system_config,
)
calls = [
mock.call(mock_resource_1, {}, mock_state, utilization_errors),
mock.call(mock_resource_2, {}, mock_state, utilization_errors),
]
mock_autoscaling_info_for_resource.assert_has_calls(calls, any_order=True)
assert ret == [mock_autoscaling_info]
def test_autoscaling_info_for_resources():
mock_resources = {'sfr-blah': {
'id': 'sfr-blah',
'min_capacity': 1,
'max_capacity': 5,
'pool': 'default',
'type': 'sfr',
'region': 'westeros-1',
}}
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_scaler', autospec=True,
) as mock_get_scaler:
# test cancelled
mock_metrics_provider = mock.Mock(return_value=(2, 4))
mock_scaler = mock.Mock(
metrics_provider=mock_metrics_provider,
resource=mock_resources['sfr-blah'],
is_resource_cancelled=mock.Mock(return_value=True),
instances=["mock_instance"],
)
mock_scaler_class = mock.Mock(return_value=mock_scaler)
mock_get_scaler.return_value = mock_scaler_class
mock_state = mock.Mock()
mock_utilization_errors = {('westeros-1', 'default',): 0}
ret = autoscaling_cluster_lib.autoscaling_info_for_resource(
mock_resources['sfr-blah'], {}, mock_state, mock_utilization_errors,
)
assert mock_metrics_provider.called
mock_scaler_class.assert_called_with(
resource=mock_resources['sfr-blah'],
pool_settings={},
config_folder=None,
dry_run=True,
utilization_error=0,
)
assert ret == autoscaling_cluster_lib.AutoscalingInfo(
resource_id='sfr-blah',
pool='default',
state='cancelled',
current='2',
target='4',
min_capacity='1',
max_capacity='5',
instances='1',
)
# test active
mock_scaler = mock.Mock(
metrics_provider=mock_metrics_provider,
resource=mock_resources['sfr-blah'],
is_resource_cancelled=mock.Mock(return_value=False),
instances=["mock_instance"],
)
mock_scaler_class = mock.Mock(return_value=mock_scaler)
mock_get_scaler.return_value = mock_scaler_class
ret = autoscaling_cluster_lib.autoscaling_info_for_resource(
mock_resources['sfr-blah'], {}, mock_state, mock_utilization_errors,
)
assert ret == autoscaling_cluster_lib.AutoscalingInfo(
resource_id='sfr-blah',
pool='default',
state='active',
current='2',
target='4',
min_capacity='1',
max_capacity='5',
instances='1',
)
# Test exception getting target
mock_metrics_provider = mock.Mock(side_effect=autoscaling_cluster_lib.ClusterAutoscalingError)
mock_scaler = mock.Mock(
metrics_provider=mock_metrics_provider,
resource=mock_resources['sfr-blah'],
is_resource_cancelled=mock.Mock(return_value=False),
current_capacity=2,
instances=["mock_instance"],
)
mock_scaler_class = mock.Mock(return_value=mock_scaler)
mock_get_scaler.return_value = mock_scaler_class
ret = autoscaling_cluster_lib.autoscaling_info_for_resource(
mock_resources['sfr-blah'], {}, mock_state, mock_utilization_errors,
)
assert ret == autoscaling_cluster_lib.AutoscalingInfo(
resource_id='sfr-blah',
pool='default',
state='active',
current='2',
target='Exception',
min_capacity='1',
max_capacity='5',
instances='1',
)
class TestAsgAutoscaler(unittest.TestCase):
mock_resource = {
'id': 'asg-blah', 'type': 'aws_autoscaling_group',
'region': 'westeros-1', 'pool': 'default',
}
mock_config_folder = '/nail/blah'
mock_pool_settings = {'drain_timeout': 123}
def setUp(self):
self.autoscaler = self.create_autoscaler()
def create_autoscaler(self, utilization_error=0.3, resource=None, asg=None):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AsgAutoscaler.get_asg',
autospec=True,
return_value=asg or {},
):
autoscaler = autoscaling_cluster_lib.AsgAutoscaler(
resource or self.mock_resource,
self.mock_pool_settings,
self.mock_config_folder,
False,
utilization_error,
)
autoscaler.instances = []
return autoscaler
def create_mock_resource(self, **kwargs):
mock_resource = self.mock_resource.copy()
mock_resource.update(**kwargs)
return mock_resource
def test_exists(self):
self.autoscaler.asg = mock.Mock()
assert self.autoscaler.exists
self.autoscaler.asg = None
assert not self.autoscaler.exists
def test_current_capacity(self):
self.autoscaler.asg = {'Instances': [mock.Mock()] * 3}
assert self.autoscaler.current_capacity == 3
def test_is_asg_cancelled(self):
self.autoscaler.asg = None
assert self.autoscaler.is_resource_cancelled()
self.autoscaler.asg = mock.Mock()
assert not self.autoscaler.is_resource_cancelled()
def test_get_asg(self):
with mock.patch('boto3.client', autospec=True) as mock_ec2_client:
mock_asg = mock.Mock()
mock_asgs = {'AutoScalingGroups': [mock_asg]}
mock_describe_auto_scaling_groups = mock.Mock(return_value=mock_asgs)
mock_ec2_client.return_value = mock.Mock(describe_auto_scaling_groups=mock_describe_auto_scaling_groups)
ret = self.autoscaler.get_asg('asg-blah', region='westeros-1')
mock_describe_auto_scaling_groups.assert_called_with(AutoScalingGroupNames=['asg-blah'])
assert ret == mock_asg
mock_asgs = {'AutoScalingGroups': []}
mock_describe_auto_scaling_groups = mock.Mock(return_value=mock_asgs)
mock_ec2_client.return_value = mock.Mock(describe_auto_scaling_groups=mock_describe_auto_scaling_groups)
ret = self.autoscaler.get_asg('asg-blah', region='westeros-1')
assert ret is None
def test_set_asg_capacity(self):
with mock.patch('boto3.client', autospec=True) as mock_ec2_client, mock.patch(
'time.sleep', autospec=True,
):
mock_update_auto_scaling_group = mock.Mock()
mock_ec2_client.return_value = mock.Mock(update_auto_scaling_group=mock_update_auto_scaling_group)
self.autoscaler.dry_run = True
self.autoscaler.set_capacity(2)
assert not mock_update_auto_scaling_group.called
self.autoscaler.dry_run = False
self.autoscaler.set_capacity(2)
mock_ec2_client.assert_called_with('autoscaling', region_name='westeros-1')
mock_update_auto_scaling_group.assert_called_with(
AutoScalingGroupName='asg-blah',
DesiredCapacity=2,
)
with raises(autoscaling_cluster_lib.FailSetResourceCapacity):
mock_update_auto_scaling_group.side_effect = ClientError({'Error': {'Code': 1}}, 'blah')
self.autoscaler.set_capacity(2)
assert self.autoscaler.capacity == 2
def test_get_instance_type_weights_asg(self):
ret = self.autoscaler.get_instance_type_weights()
assert ret is None
def test_get_asg_delta(self):
resource = self.create_mock_resource(min_capacity=2, max_capacity=10)
asg = {'Instances': [mock.Mock()] * 5}
autoscaler = self.create_autoscaler(utilization_error=-0.2, resource=resource, asg=asg)
ret = autoscaler.get_asg_delta()
assert ret == (5, 4)
autoscaler = self.create_autoscaler(utilization_error=0.2, resource=resource, asg=asg)
ret = autoscaler.get_asg_delta()
assert ret == (5, 6)
big_asg = {'Instances': [mock.Mock()] * 10}
autoscaler = self.create_autoscaler(utilization_error=0.2, resource=resource, asg=big_asg)
ret = autoscaler.get_asg_delta()
assert ret == (10, 10)
small_asg = {'Instances': [mock.Mock()] * 2}
autoscaler = self.create_autoscaler(utilization_error=-0.2, resource=resource, asg=small_asg)
ret = autoscaler.get_asg_delta()
assert ret == (2, 2)
resource_zero_min = self.create_mock_resource(min_capacity=0, max_capacity=10)
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource_zero_min, asg=small_asg)
ret = autoscaler.get_asg_delta()
assert ret == (2, 1)
tiny_asg = {'Instances': [mock.Mock()] * 1}
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource_zero_min, asg=tiny_asg)
ret = autoscaler.get_asg_delta()
assert ret == (1, 0)
empty_asg = {'Instances': []}
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource_zero_min, asg=empty_asg)
ret = autoscaler.get_asg_delta()
assert ret == (0, 1)
resource_big_max = self.create_mock_resource(min_capacity=0, max_capacity=100)
bigger_asg = {'Instances': [mock.Mock()] * 20}
autoscaler = self.create_autoscaler(utilization_error=-0.5, resource=resource_big_max, asg=bigger_asg)
ret = autoscaler.get_asg_delta()
assert ret == (20, int(floor(20 * (1.0 - autoscaling_cluster_lib.MAX_CLUSTER_DELTA))))
resource_zeroes = self.create_mock_resource(min_capacity=0, max_capacity=0)
autoscaler = self.create_autoscaler(utilization_error=-0.5, resource=resource_zeroes, asg=bigger_asg)
ret = autoscaler.get_asg_delta()
assert ret == (20, int(floor(20 * (1.0 - autoscaling_cluster_lib.MAX_CLUSTER_DELTA))))
resource = self.create_mock_resource(min_capacity=10, max_capacity=40)
current_instances = int((10 * (1 - autoscaling_cluster_lib.MAX_CLUSTER_DELTA)) - 1)
asg = {'Instances': [mock.Mock()] * current_instances}
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource, asg=asg)
ret = autoscaler.get_asg_delta()
assert ret == (current_instances, 10)
def test_asg_metrics_provider(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AsgAutoscaler.get_asg_delta', autospec=True,
) as mock_get_asg_delta, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AsgAutoscaler.get_aws_slaves', autospec=True,
) as mock_get_aws_slaves, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_master', autospec=True,
) as mock_get_mesos_master, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AsgAutoscaler.cleanup_cancelled_config',
autospec=True,
) as mock_cleanup_cancelled_config, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AsgAutoscaler.is_aws_launching_instances',
autospec=True,
) as mock_is_aws_launching_asg_instances, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.emit_metrics',
autospec=True,
) as mock_emit_metrics:
mock_get_asg_delta.return_value = 1, 2
self.autoscaler.pool_settings = {}
mock_is_aws_launching_asg_instances.return_value = False
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(state=mock_mesos_state)
mock_get_mesos_master.return_value = mock_master
mock_slaves = ['one', 'two']
mock_get_aws_slaves.return_value = mock_slaves
# cancelled ASG
self.autoscaler.asg = None
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_cleanup_cancelled_config.assert_called_with(self.autoscaler, 'asg-blah', '/nail/blah', dry_run=False)
assert not mock_get_aws_slaves.called
assert ret == (0, 0)
# active ASG
self.autoscaler.asg = {'some': 'stuff'}
mock_cleanup_cancelled_config.reset_mock()
mock_emit_metrics.reset_mock()
self.autoscaler.instances = [mock.Mock(), mock.Mock()]
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_get_asg_delta.assert_called_with(self.autoscaler)
mock_emit_metrics.assert_called_once_with(self.autoscaler, 1, 2, mesos_slave_count=len(mock_slaves))
assert not mock_cleanup_cancelled_config.called
assert ret == (1, 2)
# active ASG with AWS still provisioning
mock_cleanup_cancelled_config.reset_mock()
mock_is_aws_launching_asg_instances.return_value = True
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert ret == (0, 0)
# ASG with no instances
self.autoscaler.instances = []
mock_is_aws_launching_asg_instances.return_value = False
self.autoscaler.metrics_provider(mock_mesos_state)
mock_get_asg_delta.assert_called_with(self.autoscaler)
# ASG scaling up with too many unregistered instances
self.autoscaler.instances = [mock.Mock() for _ in range(10)]
with raises(autoscaling_cluster_lib.ClusterAutoscalingError):
self.autoscaler.metrics_provider(mock_mesos_state)
# ASG scaling down with many unregistered instances
mock_emit_metrics.reset_mock()
self.autoscaler.utilization_error = -0.1
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_emit_metrics.assert_called_once_with(self.autoscaler, 1, 2, mesos_slave_count=len(mock_slaves))
assert ret == (1, 2)
def test_is_aws_launching_asg_instances(self):
self.autoscaler.asg = {'DesiredCapacity': 3, 'Instances': [mock.Mock(), mock.Mock()]}
assert self.autoscaler.is_aws_launching_instances()
self.autoscaler.asg = {'DesiredCapacity': 1, 'Instances': [mock.Mock(), mock.Mock()]}
assert not self.autoscaler.is_aws_launching_instances()
self.autoscaler.asg = {'DesiredCapacity': 2, 'Instances': [mock.Mock(), mock.Mock()]}
assert not self.autoscaler.is_aws_launching_instances()
class TestSpotAutoscaler(unittest.TestCase):
mock_resource = {'id': 'sfr-blah', 'type': 'sfr', 'region': 'westeros-1', 'pool': 'default'}
mock_pool_settings = {'drain_timeout': 123}
mock_config_folder = '/nail/blah'
def setUp(self):
self.autoscaler = self.create_autoscaler()
def create_autoscaler(self, utilization_error=0.3, resource=None, sfr=None):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_sfr', autospec=True,
) as mock_get_sfr, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_spot_fleet_instances',
autospec=True,
) as mock_get_spot_fleet_instances:
mock_get_sfr.return_value = sfr or {}
mock_get_spot_fleet_instances.return_value = []
return autoscaling_cluster_lib.SpotAutoscaler(
resource or self.mock_resource,
self.mock_pool_settings,
self.mock_config_folder,
False,
utilization_error,
)
def create_mock_resource(self, **kwargs):
mock_resource = self.mock_resource.copy()
mock_resource.update(**kwargs)
return mock_resource
def create_mock_sfr(self, fulfilled_capacity, request_state='active'):
return {
'SpotFleetRequestState': request_state,
'SpotFleetRequestConfig': {
'FulfilledCapacity': fulfilled_capacity,
},
}
def test_exists(self):
self.autoscaler.sfr = {'SpotFleetRequestState': 'active'}
assert self.autoscaler.exists
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled'}
assert not self.autoscaler.exists
self.autoscaler.sfr = None
assert not self.autoscaler.exists
def test_current_capacity(self):
self.autoscaler.sfr = {'SpotFleetRequestConfig': {'FulfilledCapacity': 2}}
assert self.autoscaler.current_capacity == 2
def test_get_spot_fleet_instances(self):
with mock.patch('boto3.client', autospec=True) as mock_ec2_client:
mock_instances = mock.Mock()
mock_sfr = {'ActiveInstances': mock_instances}
mock_describe_spot_fleet_instances = mock.Mock(return_value=mock_sfr)
mock_ec2_client.return_value = mock.Mock(describe_spot_fleet_instances=mock_describe_spot_fleet_instances)
ret = self.autoscaler.get_spot_fleet_instances('sfr-blah', region='westeros-1')
assert ret == mock_instances
def test_is_aws_launching_sfr_instances(self):
self.autoscaler.sfr = {'SpotFleetRequestConfig': {
'FulfilledCapacity': 5,
'TargetCapacity': 10,
}}
assert self.autoscaler.is_aws_launching_instances()
self.autoscaler.sfr = {'SpotFleetRequestConfig': {
'FulfilledCapacity': 10,
'TargetCapacity': 5,
}}
assert not self.autoscaler.is_aws_launching_instances()
self.autoscaler.sfr = {'SpotFleetRequestConfig': {
'FulfilledCapacity': 10,
'TargetCapacity': 10,
}}
assert not self.autoscaler.is_aws_launching_instances()
def test_is_sfr_cancelled(self):
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled'}
assert self.autoscaler.is_resource_cancelled()
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled_running'}
assert self.autoscaler.is_resource_cancelled()
self.autoscaler.sfr = {'SpotFleetRequestState': 'active'}
assert not self.autoscaler.is_resource_cancelled()
self.autoscaler.sfr = None
assert self.autoscaler.is_resource_cancelled()
def test_cancelled_running(self):
sfr = self.create_mock_sfr(fulfilled_capacity=4, request_state='cancelled_running')
resource = self.create_mock_resource(min_capacity=4, max_capacity=10)
autoscaler = self.create_autoscaler(utilization_error=0.1, resource=resource, sfr=sfr)
assert autoscaler.utilization_error == -1
assert autoscaler.resource['min_capacity'] == 0
def test_get_sfr(self):
with mock.patch('boto3.client', autospec=True) as mock_ec2_client:
mock_sfr_config = mock.Mock()
mock_sfr = {'SpotFleetRequestConfigs': [mock_sfr_config]}
mock_describe_spot_fleet_requests = mock.Mock(return_value=mock_sfr)
mock_ec2_client.return_value = mock.Mock(describe_spot_fleet_requests=mock_describe_spot_fleet_requests)
ret = self.autoscaler.get_sfr('sfr-blah', region='westeros-1')
mock_describe_spot_fleet_requests.assert_called_with(SpotFleetRequestIds=['sfr-blah'])
assert ret == mock_sfr_config
mock_error = {'Error': {'Code': 'InvalidSpotFleetRequestId.NotFound'}}
mock_describe_spot_fleet_requests = mock.Mock(side_effect=ClientError(mock_error, 'blah'))
mock_ec2_client.return_value = mock.Mock(describe_spot_fleet_requests=mock_describe_spot_fleet_requests)
ret = self.autoscaler.get_sfr('sfr-blah', region='westeros-1')
assert ret is None
def test_set_spot_fleet_request_capacity(self):
with mock.patch(
'boto3.client', autospec=True,
) as mock_ec2_client, mock.patch(
'time.sleep', autospec=True,
) as mock_sleep, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_sfr', autospec=True,
) as mock_get_sfr, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.AWS_SPOT_MODIFY_TIMEOUT', autospec=True,
):
mock_sleep.side_effect = TimeoutError()
mock_get_sfr.return_value = {'SpotFleetRequestState': 'modifying'}
mock_modify_spot_fleet_request = mock.Mock()
mock_ec2_client.return_value = mock.Mock(modify_spot_fleet_request=mock_modify_spot_fleet_request)
with raises(autoscaling_cluster_lib.FailSetResourceCapacity):
ret = self.autoscaler.set_capacity(4.1)
assert not mock_modify_spot_fleet_request.called
mock_modify_spot_fleet_request.side_effect = ClientError({'Error': {}}, 'blah')
mock_get_sfr.return_value = {'SpotFleetRequestState': 'active'}
with raises(autoscaling_cluster_lib.FailSetResourceCapacity):
ret = self.autoscaler.set_capacity(4.1)
mock_modify_spot_fleet_request.side_effect = None
ret = self.autoscaler.set_capacity(4.1)
mock_modify_spot_fleet_request.assert_called_with(
SpotFleetRequestId='sfr-blah',
TargetCapacity=4,
ExcessCapacityTerminationPolicy='noTermination',
)
assert ret is not None
assert self.autoscaler.capacity == 4.1
def test_get_instance_type_weights_sfr(self):
mock_launch_specs = [
{
'InstanceType': 'c4.blah',
'WeightedCapacity': 123,
},
{
'InstanceType': 'm4.whatever',
'WeightedCapacity': 456,
},
]
self.autoscaler.sfr = {'SpotFleetRequestConfig': {'LaunchSpecifications': mock_launch_specs}}
ret = self.autoscaler.get_instance_type_weights()
assert ret == {'c4.blah': 123, 'm4.whatever': 456}
def test_get_spot_fleet_delta(self):
resource = self.create_mock_resource(min_capacity=2, max_capacity=10)
sfr = self.create_mock_sfr(fulfilled_capacity=5)
autoscaler = self.create_autoscaler(utilization_error=-0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (5, 4)
sfr = self.create_mock_sfr(fulfilled_capacity=7.3)
autoscaler = self.create_autoscaler(utilization_error=-0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (7.3, 6)
sfr = self.create_mock_sfr(fulfilled_capacity=5)
autoscaler = self.create_autoscaler(utilization_error=0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (5, 6)
sfr = self.create_mock_sfr(fulfilled_capacity=10)
autoscaler = self.create_autoscaler(utilization_error=0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (10, 10)
sfr = self.create_mock_sfr(fulfilled_capacity=2)
autoscaler = self.create_autoscaler(utilization_error=-0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (2, 2)
resource = self.create_mock_resource(min_capacity=2, max_capacity=10)
sfr = self.create_mock_sfr(fulfilled_capacity=5, request_state='cancelled_running')
autoscaler = self.create_autoscaler(utilization_error=0.2, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (5, 4)
resource = self.create_mock_resource(min_capacity=0, max_capacity=10)
sfr = self.create_mock_sfr(fulfilled_capacity=1)
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (1, 1)
resource = self.create_mock_resource(min_capacity=0, max_capacity=100)
sfr = self.create_mock_sfr(fulfilled_capacity=20)
autoscaler = self.create_autoscaler(utilization_error=-0.5, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (20, int(floor(20 * (1.0 - autoscaling_cluster_lib.MAX_CLUSTER_DELTA))))
resource = self.create_mock_resource(min_capacity=10, max_capacity=100)
current_instances = (10 * (1 - autoscaling_cluster_lib.MAX_CLUSTER_DELTA)) - 1
sfr = self.create_mock_sfr(fulfilled_capacity=current_instances)
autoscaler = self.create_autoscaler(utilization_error=-1, resource=resource, sfr=sfr)
ret = autoscaler.get_spot_fleet_delta()
assert ret == (current_instances, 10)
def test_spotfleet_metrics_provider(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_spot_fleet_delta',
autospec=True,
) as mock_get_spot_fleet_delta, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.get_aws_slaves', autospec=True,
) as mock_get_aws_slaves, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_master', autospec=True,
) as mock_get_mesos_master, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.cleanup_cancelled_config',
autospec=True,
) as mock_cleanup_cancelled_config, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.SpotAutoscaler.is_aws_launching_instances',
autospec=True,
) as mock_is_aws_launching_sfr_instances, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.emit_metrics',
autospec=True,
) as mock_emit_metrics:
mock_get_spot_fleet_delta.return_value = 1, 2
self.autoscaler.pool_settings = {}
mock_is_aws_launching_sfr_instances.return_value = False
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(state=mock_mesos_state)
mock_get_mesos_master.return_value = mock_master
mock_slaves = ['one', 'two']
mock_get_aws_slaves.return_value = mock_slaves
# cancelled SFR
self.autoscaler.instances = [mock.Mock(), mock.Mock()]
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled'}
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_cleanup_cancelled_config.assert_called_with(self.autoscaler, 'sfr-blah', '/nail/blah', dry_run=False)
assert not mock_get_mesos_master.called
assert ret == (0, 0)
# deleted SFR
mock_cleanup_cancelled_config.reset_mock()
self.autoscaler.sfr = None
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_cleanup_cancelled_config.assert_called_with(self.autoscaler, 'sfr-blah', '/nail/blah', dry_run=False)
assert not mock_get_mesos_master.called
assert ret == (0, 0)
# active SFR
mock_cleanup_cancelled_config.reset_mock()
mock_emit_metrics.reset_mock()
self.autoscaler.sfr = {'SpotFleetRequestState': 'active'}
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_get_spot_fleet_delta.assert_called_with(self.autoscaler)
mock_emit_metrics.assert_called_once_with(
self.autoscaler, 1, 2, mesos_slave_count=len(mock_slaves),
)
assert not mock_cleanup_cancelled_config.called
assert ret == (1, 2)
# SFR scaling up with too many unregistered instances
mock_emit_metrics.reset_mock()
self.autoscaler.instances = [mock.Mock() for _ in range(10)]
with raises(autoscaling_cluster_lib.ClusterAutoscalingError):
self.autoscaler.metrics_provider(mock_mesos_state)
# SFR scaling down with many unregistered instances
self.autoscaler.utilization_error = -0.1
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_emit_metrics.assert_called_once_with(
self.autoscaler, 1, 2, mesos_slave_count=len(mock_slaves),
)
assert ret == (1, 2)
self.autoscaler.instances = [mock.Mock(), mock.Mock()]
self.autoscaler.utilization_error = 0.3
# active SFR with AWS still provisioning
mock_emit_metrics.reset_mock()
mock_get_spot_fleet_delta.reset_mock()
mock_cleanup_cancelled_config.reset_mock()
self.autoscaler.sfr = {'SpotFleetRequestState': 'active'}
mock_is_aws_launching_sfr_instances.return_value = True
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_emit_metrics.assert_called_once_with(
self.autoscaler, 1, 2, mesos_slave_count=len(mock_slaves),
)
assert ret == (1, 2)
assert mock_get_spot_fleet_delta.called
# cancelled_running SFR
mock_emit_metrics.reset_mock()
mock_cleanup_cancelled_config.reset_mock()
mock_get_spot_fleet_delta.reset_mock()
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled_running'}
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert not mock_cleanup_cancelled_config.called
assert ret == (0, 0)
mock_get_spot_fleet_delta.return_value = 2, 1
self.autoscaler.utilization_error = -0.2
ret = self.autoscaler.metrics_provider(mock_mesos_state)
mock_emit_metrics.assert_called_once_with(
self.autoscaler, 2, 0, mesos_slave_count=len(mock_slaves),
)
assert ret == (2, 0)
mock_get_spot_fleet_delta.return_value = 4, 2
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert ret == (4, 2)
# cancelled_running SFR with pool underprovisioned
self.autoscaler.utilization_error = 0.2
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert ret == (0, 0)
# SFR with no instances
mock_get_mesos_master.reset_mock()
self.autoscaler.instances = []
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert ret == (0, 0)
assert not mock_get_mesos_master.called
# unknown SFR
mock_get_mesos_master.reset_mock()
mock_cleanup_cancelled_config.reset_mock()
self.autoscaler.sfr = {'SpotFleetRequestState': 'not-a-state'}
with raises(autoscaling_cluster_lib.ClusterAutoscalingError):
ret = self.autoscaler.metrics_provider(mock_mesos_state)
assert not mock_get_mesos_master.called
class TestClusterAutoscaler(unittest.TestCase):
def setUp(self):
mock_resource = {
'id': 'sfr-blah',
'type': 'sfr',
'region': 'westeros-1',
'pool': 'default',
'min_capacity': 3,
'max_capacity': 10,
}
mock_pool_settings = {'drain_timeout': 123}
mock_config_folder = '/nail/blah'
mock_utilization_error = 0.3
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_metrics_interface',
autospec=True,
), mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.load_system_paasta_config',
autospec=True,
):
self.autoscaler = autoscaling_cluster_lib.ClusterAutoscaler(
mock_resource,
mock_pool_settings,
mock_config_folder,
False,
mock_utilization_error,
enable_metrics=True,
)
def test_emit_metrics(self):
patchers = [
mock.patch.object(self.autoscaler, gauge)
for gauge in (
'target_gauge', 'current_gauge', 'ideal_gauge', 'max_gauge',
'min_gauge', 'mesos_error_gauge', 'aws_instances_gauge',
'mesos_slaves_gauge',
)
]
with contextlib.ExitStack() as stack:
for patcher in patchers:
stack.enter_context(patcher)
self.autoscaler.ideal_capacity = 4
self.autoscaler.emit_metrics(
current_capacity=1,
target_capacity=2,
mesos_slave_count=5,
)
self.autoscaler.current_gauge.set.assert_called_once_with(1)
self.autoscaler.target_gauge.set.assert_called_once_with(2)
self.autoscaler.ideal_gauge.set.assert_called_once_with(4)
self.autoscaler.min_gauge.set.assert_called_once_with(3)
self.autoscaler.max_gauge.set.assert_called_once_with(10)
self.autoscaler.mesos_error_gauge.set.assert_called_once_with(0.3)
self.autoscaler.aws_instances_gauge.set.assert_called_once_with(0)
self.autoscaler.mesos_slaves_gauge.set.assert_called_once_with(5)
def test_describe_instance(self):
with mock.patch('boto3.client', autospec=True) as mock_ec2_client:
mock_instance_1 = mock.Mock()
mock_instance_2 = mock.Mock()
mock_instance_3 = mock.Mock()
mock_instances = {'Reservations': [{'Instances': [mock_instance_1]}, {'Instances': [mock_instance_2]}]}
mock_describe_instances = mock.Mock(return_value=mock_instances)
mock_ec2_client.return_value = mock.Mock(describe_instances=mock_describe_instances)
ret = self.autoscaler.describe_instances(
['i-1', 'i-2'],
region='westeros-1',
instance_filters=['filter1'],
)
mock_describe_instances.assert_called_with(InstanceIds=['i-1', 'i-2'], Filters=['filter1'])
assert ret == [mock_instance_1, mock_instance_2]
ret = self.autoscaler.describe_instances(['i-1', 'i-2'], region='westeros-1')
mock_describe_instances.assert_called_with(InstanceIds=['i-1', 'i-2'], Filters=[])
mock_error = {'Error': {'Code': 'InvalidInstanceID.NotFound'}}
mock_describe_instances.side_effect = ClientError(mock_error, 'blah')
ret = self.autoscaler.describe_instances(['i-1', 'i-2'], region='westeros-1')
assert ret is None
mock_instances = {'Reservations': [
{'Instances': [mock_instance_1, mock_instance_2]},
{'Instances': [mock_instance_3]},
]}
mock_describe_instances = mock.Mock(return_value=mock_instances)
mock_ec2_client.return_value = mock.Mock(describe_instances=mock_describe_instances)
ret = self.autoscaler.describe_instances(['i-1', 'i-2', 'i-3'], region='westeros-1')
assert ret == [mock_instance_1, mock_instance_2, mock_instance_3]
def test_scale_resource(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.filter_aws_slaves',
autospec=True,
) as mock_filter_aws_slaves, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_master', autospec=True,
) as mock_get_mesos_master, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_task_count_by_slave', autospec=True,
) as mock_get_mesos_task_count_by_slave, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.downscale_aws_resource',
autospec=True,
) as mock_downscale_aws_resource, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.set_capacity', autospec=True,
) as mock_set_capacity, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.terminate_instances', autospec=True,
) as mock_terminate_instances:
mock_set_capacity.return_value = True
mock_master = mock.Mock()
mock_mesos_state = mock.Mock()
mock_master.state_summary.return_value = mock_mesos_state
mock_get_mesos_master.return_value = mock_master
mock_downscale_aws_resource.side_effect = just_sleep
# test no scale
_run(self.autoscaler.scale_resource(4, 4))
assert not mock_set_capacity.called
# test scale up
_run(self.autoscaler.scale_resource(2, 4))
mock_set_capacity.assert_called_with(self.autoscaler, 4)
# test scale down
mock_slave_1 = mock.Mock(instance_weight=1.099999999)
mock_slave_2 = mock.Mock(instance_weight=2.2)
mock_sfr_sorted_slaves_1 = [mock_slave_1, mock_slave_2]
mock_filter_aws_slaves.return_value = mock_sfr_sorted_slaves_1
_run(self.autoscaler.scale_resource(3.3, 0))
assert mock_get_mesos_master.called
mock_get_mesos_task_count_by_slave.assert_called_with(
mock_mesos_state,
pool='default',
)
mock_filter_aws_slaves.assert_called_with(self.autoscaler, mock_get_mesos_task_count_by_slave.return_value)
mock_downscale_aws_resource.assert_called_with(
self.autoscaler,
filtered_slaves=mock_filter_aws_slaves.return_value,
current_capacity=3.3,
target_capacity=0,
)
mock_set_capacity.reset_mock()
# test scale down when not all slaves have joined cluster
mock_slave_1 = mock.Mock(instance_weight=0.7, instance_id='abc')
mock_slave_2 = mock.Mock(instance_weight=1.1, instance_id='def')
mock_filter_aws_slaves.return_value = [mock_slave_1, mock_slave_2]
self.autoscaler.instances = [
{'InstanceId': 'abc'},
{'InstanceId': 'def'},
{'InstanceId': 'ghi'},
]
_run(self.autoscaler.scale_resource(3.5, 1.8))
mock_set_capacity.assert_called_once_with(self.autoscaler, 1.8)
mock_terminate_instances.assert_called_once_with(
self.autoscaler,
['ghi'],
)
def test_downscale_aws_resource(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_mesos_task_count_by_slave', autospec=True,
) as mock_get_mesos_task_count_by_slave, mock.patch(
'paasta_tools.autoscaling.ec2_fitness.sort_by_ec2_fitness',
autospec=True,
) as mock_sort_slaves_to_kill, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.gracefully_terminate_slave',
autospec=True,
) as mock_gracefully_terminate_slave, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.Timer', autospec=True,
) as mock_timer:
mock_timer_value = mock.Mock()
mock_timer.return_value = mock_timer_value
mock_gracefully_terminate_slave.side_effect = just_sleep
mock_task_counts = mock.Mock()
mock_slave_1 = mock.Mock(
hostname='host1',
instance_id='i-blah123',
task_counts=mock_task_counts,
instance_weight=1,
)
mock_slave_2 = mock.Mock(
hostname='host2',
instance_id='i-blah456',
task_counts=mock_task_counts,
instance_weight=2,
)
mock_get_mesos_task_count_by_slave.return_value = [{'task_counts': mock_task_counts}]
self.autoscaler.resource = {'type': 'aws_spot_fleet_request', 'sfr': {'SpotFleetRequestState': 'active'}}
self.autoscaler.sfr = {'SpotFleetRequestState': 'active'}
mock_filtered_slaves = mock.Mock()
mock_sfr_sorted_slaves_1 = [mock_slave_1, mock_slave_2]
mock_sfr_sorted_slaves_2 = [mock_slave_2]
# test we kill only one instance on scale down and then reach capacity
mock_sort_slaves_to_kill.return_value = mock_sfr_sorted_slaves_2
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=5,
target_capacity=4,
))
assert mock_gracefully_terminate_slave.call_count == 1
mock_gracefully_terminate_slave.reset_mock()
# test we always kill one SFR instance at least to stop getting wedged
mock_slave_1 = mock.Mock(
hostname='host1',
instance_id='i-blah123',
task_counts=mock_task_counts,
instance_weight=0.3,
)
mock_slave_2 = mock.Mock(
hostname='host2',
instance_id='i-blah456',
task_counts=mock_task_counts,
instance_weight=2,
)
mock_sort_slaves_to_kill.return_value = mock_sfr_sorted_slaves_2
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=5,
target_capacity=4,
))
assert mock_gracefully_terminate_slave.call_count == 1
mock_gracefully_terminate_slave.reset_mock()
# but not if it takes us to setting 0 capacity
mock_slave_1 = mock.Mock(
hostname='host1',
instance_id='i-blah123',
task_counts=mock_task_counts,
instance_weight=1.1,
)
mock_slave_2 = mock.Mock(
hostname='host2',
instance_id='i-blah456',
task_counts=mock_task_counts,
instance_weight=2,
)
mock_sort_slaves_to_kill.return_value = mock_sfr_sorted_slaves_2
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=2,
target_capacity=1,
))
assert not mock_gracefully_terminate_slave.called
mock_gracefully_terminate_slave.reset_mock()
# unless this is a cancelled SFR in which case we can go to 0
self.autoscaler.sfr = {'SpotFleetRequestState': 'cancelled'}
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=2,
target_capacity=1,
))
assert mock_gracefully_terminate_slave.call_count == 1
mock_gracefully_terminate_slave.reset_mock()
# test stop if FailSetSpotCapacity
mock_slave_1 = mock.Mock(
hostname='host1',
instance_id='i-blah123',
task_counts=mock_task_counts,
instance_weight=1,
)
mock_terminate_call_1 = mock.call(
self.autoscaler,
slave_to_kill=mock_slave_1,
capacity_diff=-1,
timer=mock_timer_value,
)
mock_terminate_call_2 = mock.call(
self.autoscaler,
slave_to_kill=mock_slave_2,
capacity_diff=-2,
timer=mock_timer_value,
)
# for draining slave 1 failure HTTPError scenario
mock_terminate_call_3 = mock.call(
self.autoscaler,
slave_to_kill=mock_slave_2,
capacity_diff=-2,
timer=mock_timer_value,
)
mock_gracefully_terminate_slave.side_effect = get_coro_with_exception(
autoscaling_cluster_lib.FailSetResourceCapacity,
)
mock_sfr_sorted_slaves_1 = [mock_slave_2, mock_slave_1]
mock_sfr_sorted_slaves_2 = [mock_slave_2]
mock_sort_slaves_to_kill.side_effect = [
mock_sfr_sorted_slaves_1,
mock_sfr_sorted_slaves_2,
[],
]
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=5,
target_capacity=2,
))
mock_gracefully_terminate_slave.assert_has_calls([mock_terminate_call_1])
# test continue if HTTPError
mock_gracefully_terminate_slave.reset_mock()
mock_gracefully_terminate_slave.side_effect = get_coro_with_exception(HTTPError)
mock_sfr_sorted_slaves_1 = [mock_slave_2, mock_slave_1]
mock_sfr_sorted_slaves_2 = [mock_slave_2]
mock_sort_slaves_to_kill.side_effect = [
mock_sfr_sorted_slaves_1,
mock_sfr_sorted_slaves_2,
[],
]
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=5,
target_capacity=2,
))
mock_gracefully_terminate_slave.assert_has_calls([
mock_terminate_call_1,
mock_terminate_call_3,
])
# test normal scale down
mock_gracefully_terminate_slave.side_effect = just_sleep
mock_gracefully_terminate_slave.reset_mock()
mock_get_mesos_task_count_by_slave.reset_mock()
mock_sort_slaves_to_kill.reset_mock()
mock_sfr_sorted_slaves_1 = [mock_slave_2, mock_slave_1]
mock_sfr_sorted_slaves_2 = [mock_slave_2]
mock_sort_slaves_to_kill.side_effect = [
mock_sfr_sorted_slaves_1,
mock_sfr_sorted_slaves_2,
[],
]
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=5,
target_capacity=2,
))
mock_gracefully_terminate_slave.assert_has_calls([
mock_terminate_call_1,
mock_terminate_call_2,
])
# test non integer scale down
# this should result in killing 3 instances,
# leaving us on 7.1 provisioned of target 7
mock_slave_1 = mock.Mock(
hostname='host1',
instance_id='i-blah123',
instance_weight=0.3,
)
mock_gracefully_terminate_slave.side_effect = just_sleep
mock_gracefully_terminate_slave.reset_mock()
mock_get_mesos_task_count_by_slave.reset_mock()
mock_sort_slaves_to_kill.reset_mock()
mock_sfr_sorted_slaves = [mock_slave_1] * 10
mock_sort_slaves_to_kill.side_effect = [mock_sfr_sorted_slaves] + \
[mock_sfr_sorted_slaves[x:-1] for x in range(0, 10)]
mock_get_mesos_task_count_by_slave.return_value = [
{'task_counts': mock_slave_1}
for x in range(0, 9)
]
_run(self.autoscaler.downscale_aws_resource(
filtered_slaves=mock_filtered_slaves,
current_capacity=8,
target_capacity=7,
))
assert mock_gracefully_terminate_slave.call_count == 3
def test_filter_instance_description_for_ip(self):
fake_description = [{'PrivateIpAddress': '10.1.1.1'}, {'PrivateIpAddress': '10.1.1.2'}]
actual = self.autoscaler.filter_instance_description_for_ip('10.1.1.1', fake_description)
assert actual == [fake_description[0]]
def test_filter_instance_status_for_instance_id(self):
fake_status = {'InstanceStatuses': [{'InstanceId': 'foo'}, {'InstanceId': 'bar'}]}
actual = self.autoscaler.filter_instance_status_for_instance_id(
instance_id='foo',
instance_statuses=fake_status,
)
assert actual == [fake_status['InstanceStatuses'][0]]
def test_instance_status_for_instance_ids_batches_calls(self):
instance_ids = [{'foo': i} for i in range(0, 100)]
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.describe_instance_status',
autospec=True,
) as mock_describe_instance_status:
mock_describe_instance_status.return_value = {'InstanceStatuses': [{'foo': 'bar'}]}
res = self.autoscaler.instance_status_for_instance_ids(instance_ids=instance_ids)
assert len(res['InstanceStatuses']) == 2
def test_gracefully_terminate_slave(self):
with mock.patch(
'time.time', autospec=True,
) as mock_time, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.drain', autospec=True,
) as mock_drain, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.undrain', autospec=True,
) as mock_undrain, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.wait_and_terminate',
autospec=True,
) as mock_wait_and_terminate, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.set_capacity',
autospec=True,
) as mock_set_capacity:
mock_timer = mock.Mock()
mock_wait_and_terminate.side_effect = just_sleep
self.autoscaler.resource = {'id': 'sfr-blah', 'region': 'westeros-1', 'type': 'sfr'}
mock_time.return_value = int(1)
mock_start = (1 + 123) * 1000000000
mock_slave = mock.Mock(
hostname='host1',
instance_id='i-blah123',
pid='slave(1)@10.1.1.1:5051',
instance_weight=1,
ip='10.1.1.1',
instance_status={'SystemStatus': {'Status': 'ok'}, 'InstanceStatus': {'Status': 'ok'}},
)
self.autoscaler.capacity = 5
_run(self.autoscaler.gracefully_terminate_slave(
slave_to_kill=mock_slave,
capacity_diff=-1,
timer=mock_timer,
))
def _set_capacity(self, capacity):
self.capacity = capacity
mock_set_capacity.side_effect = _set_capacity
mock_drain.assert_called_with(['host1|10.1.1.1'], mock_start, 600 * 1000000000)
set_call_1 = mock.call(self.autoscaler, 4)
mock_set_capacity.assert_has_calls([set_call_1])
mock_wait_and_terminate.assert_called_with(
self.autoscaler, slave=mock_slave, drain_timeout=123, dry_run=False,
region='westeros-1', should_drain=True, timer=mock_timer,
)
# test we cleanup if a termination fails
mock_set_capacity.reset_mock()
set_call_2 = mock.call(self.autoscaler, 5)
mock_wait_and_terminate.side_effect = get_coro_with_exception(ClientError({'Error': {}}, 'blah'))
self.autoscaler.capacity = 5
_run(self.autoscaler.gracefully_terminate_slave(
slave_to_kill=mock_slave,
capacity_diff=-1,
timer=mock_timer,
))
mock_drain.assert_called_with(['host1|10.1.1.1'], mock_start, 600 * 1000000000)
mock_set_capacity.assert_has_calls([set_call_1, set_call_2])
mock_wait_and_terminate.assert_called_with(
self.autoscaler, slave=mock_slave, drain_timeout=123, dry_run=False,
region='westeros-1', should_drain=True, timer=mock_timer,
)
mock_undrain.assert_called_with(['host1|10.1.1.1'])
# test we cleanup if a set spot capacity fails
mock_wait_and_terminate.side_effect = just_sleep
mock_wait_and_terminate.reset_mock()
mock_set_capacity.side_effect = autoscaling_cluster_lib.FailSetResourceCapacity
with raises(autoscaling_cluster_lib.FailSetResourceCapacity):
self.autoscaler.capacity = 5
_run(self.autoscaler.gracefully_terminate_slave(
slave_to_kill=mock_slave,
capacity_diff=-1,
timer=mock_timer,
))
mock_drain.assert_called_with(['host1|10.1.1.1'], mock_start, 600 * 1000000000)
mock_set_capacity.assert_has_calls([set_call_1])
mock_undrain.assert_called_with(['host1|10.1.1.1'])
assert not mock_wait_and_terminate.called
# test we cleanup if a drain fails
mock_wait_and_terminate.side_effect = None
mock_set_capacity.side_effect = None
mock_set_capacity.reset_mock()
mock_drain.side_effect = HTTPError
with raises(HTTPError):
self.autoscaler.capacity = 5
_run(self.autoscaler.gracefully_terminate_slave(
slave_to_kill=mock_slave,
capacity_diff=-1,
timer=mock_timer,
))
mock_drain.assert_called_with(['host1|10.1.1.1'], mock_start, 600 * 1000000000)
assert not mock_set_capacity.called
assert not mock_wait_and_terminate.called
def test_wait_and_terminate(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.terminate_instances', autospec=True,
) as mock_terminate_instances, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.asyncio.sleep', autospec=True,
) as mock_sleep, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.is_safe_to_kill', autospec=True,
) as mock_is_safe_to_kill:
mock_timer = mock.Mock()
mock_timer.ready = lambda: False
mock_sleep.side_effect = just_sleep
mock_is_safe_to_kill.return_value = True
mock_slave_to_kill = mock.Mock(
hostname='hostblah',
instance_id='i-blah123',
pid='slave(1)@10.1.1.1:5051',
ip='10.1.1.1',
)
_run(self.autoscaler.wait_and_terminate(
slave=mock_slave_to_kill, drain_timeout=600, dry_run=False, timer=mock_timer,
region='westeros-1', should_drain=True,
))
mock_terminate_instances.assert_called_with(self.autoscaler, ['i-blah123'])
mock_is_safe_to_kill.assert_called_with('hostblah')
mock_is_safe_to_kill.side_effect = [False, False, True]
_run(self.autoscaler.wait_and_terminate(
slave=mock_slave_to_kill, drain_timeout=600, dry_run=False, timer=mock_timer,
region='westeros-1', should_drain=True,
))
assert mock_is_safe_to_kill.call_count == 4
def test_get_instance_ips(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.describe_instances',
autospec=True,
) as mock_describe_instances:
mock_instance_ids = [{'InstanceId': 'i-blah1'}, {'InstanceId': 'i-blah2'}]
mock_instances = [{'PrivateIpAddress': '10.1.1.1'}, {'PrivateIpAddress': '10.2.2.2'}]
mock_describe_instances.return_value = mock_instances
ret = self.autoscaler.get_instance_ips(mock_instance_ids, region='westeros-1')
mock_describe_instances.assert_called_with(self.autoscaler, ['i-blah1', 'i-blah2'], region='westeros-1')
assert ret == ['10.1.1.1', '10.2.2.2']
def mock_pid_to_ip_side(self, pid):
return {
'slave(1)@10.1.1.1:5051': '10.1.1.1',
'slave(2)@10.2.2.2:5051': '10.2.2.2',
'slave(3)@10.3.3.3:5051': '10.3.3.3',
}[pid]
def test_filter_aws_slaves(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.get_instance_ips',
autospec=True,
) as mock_get_instance_ips, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.slave_pid_to_ip', autospec=True,
) as mock_pid_to_ip, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.describe_instances',
autospec=True,
) as mock_describe_instances, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.get_instance_type_weights',
autospec=True,
) as mock_get_instance_type_weights, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.PaastaAwsSlave', autospec=True,
) as mock_paasta_aws_slave, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.describe_instance_status',
autospec=True,
) as mock_describe_instance_status:
mock_get_instance_ips.return_value = ['10.1.1.1', '10.3.3.3']
mock_pid_to_ip.side_effect = self.mock_pid_to_ip_side
mock_instances = [
{
'InstanceId': 'i-1',
'InstanceType': 'c4.blah',
'PrivateIpAddress': '10.1.1.1',
},
{
'InstanceId': 'i-2',
'InstanceType': 'm4.whatever',
'PrivateIpAddress': '10.3.3.3',
},
{
'InstanceId': 'i-3',
'InstanceType': 'm4.whatever',
'PrivateIpAddress': '10.1.1.3',
},
]
self.autoscaler.instances = mock_instances
mock_describe_instances.return_value = mock_instances
mock_instance_status = {
'InstanceStatuses': [
{'InstanceId': 'i-1'},
{'InstanceId': 'i-2'},
{'InstanceId': 'i-3'},
],
}
mock_describe_instance_status.return_value = mock_instance_status
mock_slave_1 = {
'task_counts': SlaveTaskCount(
slave={
'pid': 'slave(1)@10.1.1.1:5051',
'id': '123',
'hostname': 'host123',
},
count=0,
chronos_count=0,
),
}
mock_slave_2 = {
'task_counts': SlaveTaskCount(
slave={
'pid': 'slave(2)@10.2.2.2:5051',
'id': '456',
'hostname': 'host456',
},
count=0,
chronos_count=0,
),
}
mock_slave_3 = {
'task_counts': SlaveTaskCount(
slave={
'pid': 'slave(3)@10.3.3.3:5051',
'id': '789',
'hostname': 'host789',
},
count=0,
chronos_count=0,
),
}
mock_sfr_sorted_slaves = [mock_slave_1, mock_slave_2, mock_slave_3]
mock_get_ip_call_1 = mock.call('slave(1)@10.1.1.1:5051')
mock_get_ip_call_2 = mock.call('slave(2)@10.2.2.2:5051')
mock_get_ip_call_3 = mock.call('slave(3)@10.3.3.3:5051')
ret = self.autoscaler.filter_aws_slaves(mock_sfr_sorted_slaves)
mock_get_instance_ips.assert_called_with(self.autoscaler, mock_instances, region='westeros-1')
mock_pid_to_ip.assert_has_calls([mock_get_ip_call_1, mock_get_ip_call_2, mock_get_ip_call_3])
mock_describe_instances.assert_called_with(
self.autoscaler,
instance_ids=[],
region='westeros-1',
instance_filters=[{
'Values': ['10.1.1.1', '10.3.3.3'],
'Name': 'private-ip-address',
}],
)
mock_get_instance_type_weights.assert_called_with(self.autoscaler)
mock_aws_slave_call_1 = mock.call(
slave=mock_slave_1,
instance_status=mock_instance_status['InstanceStatuses'][0],
instance_description=mock_instances[0],
instance_type_weights=mock_get_instance_type_weights.return_value,
)
mock_aws_slave_call_2 = mock.call(
slave=mock_slave_3,
instance_status=mock_instance_status['InstanceStatuses'][1],
instance_description=mock_instances[1],
instance_type_weights=mock_get_instance_type_weights.return_value,
)
mock_paasta_aws_slave.assert_has_calls([mock_aws_slave_call_1, mock_aws_slave_call_2])
assert len(ret) == 2
def test_get_aws_slaves(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.get_instance_ips',
autospec=True,
) as mock_get_instance_ips, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.slave_pid_to_ip', autospec=True,
) as mock_slave_pid_to_ip:
mock_slave_pid_to_ip.side_effect = pid_to_ip_sideeffect
mock_get_instance_ips.return_value = ['10.1.1.1', '10.3.3.3', '10.4.4.4']
self.autoscaler.instances = [mock.Mock(), mock.Mock(), mock.Mock()]
mock_mesos_state = {'slaves': [
{
'id': 'id1',
'attributes': {'pool': 'default'},
'pid': 'pid1',
},
{
'id': 'id2',
'attributes': {'pool': 'default'},
'pid': 'pid2',
},
{
'id': 'id3',
'attributes': {'pool': 'notdefault'},
'pid': 'pid3',
},
]}
ret = self.autoscaler.get_aws_slaves(mock_mesos_state)
mock_get_instance_ips.assert_called_with(self.autoscaler, self.autoscaler.instances, region='westeros-1')
assert ret == {'id1': mock_mesos_state['slaves'][0]}
def test_cleanup_cancelled_config(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.os.walk', autospec=True,
) as mock_os_walk, mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.os.remove', autospec=True,
) as mock_os_remove:
mock_os_walk.return_value = [
('/nail/blah', [], ['sfr-blah.json', 'sfr-another.json']),
('/nail/another', [], ['something']),
]
self.autoscaler.cleanup_cancelled_config('sfr-blah', '/nail')
mock_os_walk.assert_called_with('/nail')
mock_os_remove.assert_called_with('/nail/blah/sfr-blah.json')
mock_os_remove.reset_mock()
self.autoscaler.cleanup_cancelled_config('sfr-blah-not-exist', '/nail')
assert not mock_os_remove.called
def test_instace_descriptions_for_ips_splits_ips(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.ClusterAutoscaler.describe_instances',
autospec=True,
) as mock_describe_instances:
ips = list(range(567))
def mock_describe_instance(self, instance_ids, region, instance_filters):
return instance_filters[0]['Values']
mock_describe_instances.side_effect = mock_describe_instance
ret = self.autoscaler.instance_descriptions_for_ips(ips)
assert len(ret) == 567
assert ret == ips
assert mock_describe_instances.call_count == 3
def test_terminate_instances(self):
with mock.patch('boto3.client', autospec=True) as mock_boto_client:
mock_terminate_instances = mock.Mock()
mock_boto_client.return_value = mock.Mock(
terminate_instances=mock_terminate_instances,
)
instances_to_terminate = ['abc', 'def']
self.autoscaler.terminate_instances(instances_to_terminate)
mock_boto_client.assert_called_once_with(
'ec2',
region_name=self.autoscaler.resource['region'],
)
mock_terminate_instances.assert_called_once_with(
InstanceIds=instances_to_terminate,
DryRun=False,
)
# DryRunOperation error should be swallowed during dry run
self.autoscaler.dry_run = True
mock_terminate_instances.side_effect = ClientError(
{'Error': {'Code': 'DryRunOperation'}},
'TerminateInstances',
)
self.autoscaler.terminate_instances(instances_to_terminate)
class TestPaastaAwsSlave(unittest.TestCase):
def setUp(self):
with mock.patch(
'paasta_tools.autoscaling.autoscaling_cluster_lib.get_instances_from_ip', autospec=True,
) as mock_get_instances_from_ip:
mock_get_instances_from_ip.return_value = [{'InstanceId': 'i-1'}]
self.mock_instances = [
{
'InstanceId': 'i-1',
'InstanceType': 'c4.blah',
},
{
'InstanceId': 'i-2',
'InstanceType': 'm4.whatever',
},
{
'InstanceId': 'i-3',
'InstanceType': 'm4.whatever',
},
]
self.mock_slave_1 = {
'task_counts': SlaveTaskCount(
slave={
'pid': 'slave(1)@10.1.1.1:5051',
'id': '123',
'hostname': 'host123',
},
count=0,
chronos_count=0,
),
}
mock_instance_type_weights = {'c4.blah': 2, 'm4.whatever': 5}
self.mock_slave = autoscaling_cluster_lib.PaastaAwsSlave(
slave=self.mock_slave_1,
instance_description=self.mock_instances[0],
instance_type_weights=mock_instance_type_weights,
)
self.mock_asg_slave = autoscaling_cluster_lib.PaastaAwsSlave(
slave=self.mock_slave_1,
instance_description=self.mock_instances,
instance_type_weights=None,
)
mock_get_instances_from_ip.return_value = []
self.mock_slave_no_instance = autoscaling_cluster_lib.PaastaAwsSlave(
slave=self.mock_slave_1,
instance_description=self.mock_instances,
instance_type_weights=None,
)
mock_get_instances_from_ip.return_value = [{'InstanceId': 'i-1'}, {'InstanceId': 'i-2'}]
self.mock_slave_extra_instance = autoscaling_cluster_lib.PaastaAwsSlave(
slave=self.mock_slave_1,
instance_description=self.mock_instances,
instance_type_weights=None,
)
def test_instance_id(self):
assert self.mock_slave.instance_id == 'i-1'
def test_hostname(self):
assert self.mock_slave.hostname == 'host123'
def test_pid(self):
assert self.mock_slave.pid == 'slave(1)@10.1.1.1:5051'
def test_instance_type(self):
assert self.mock_slave.instance_type == 'c4.blah'
def test_instance_weight(self):
assert self.mock_slave.instance_weight == 2
assert self.mock_asg_slave.instance_weight == 1
|
apache-2.0
| 1,203,697,620,421,312,300 | 43.845024 | 119 | 0.593016 | false |
tensorflow/quantum
|
tensorflow_quantum/core/ops/tfq_simulate_ops.py
|
1
|
6879
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to register python op gradient."""
import tensorflow as tf
from tensorflow_quantum.core.ops.load_module import load_module
SIM_OP_MODULE = load_module("_tfq_simulate_ops.so")
def tfq_simulate_expectation(programs, symbol_names, symbol_values, pauli_sums):
"""Calculate the expectation value of circuits wrt some operator(s)
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
Returns:
`tf.Tensor` with shape [batch_size, n_ops] that holds the
expectation value for each circuit with each op applied to it
(after resolving the corresponding parameters in).
"""
return SIM_OP_MODULE.tfq_simulate_expectation(
programs, symbol_names, tf.cast(symbol_values, tf.float32), pauli_sums)
def tfq_simulate_state(programs, symbol_names, symbol_values):
"""Returns the state of the programs using the C++ state vector simulator.
Simulate the final state of `programs` given `symbol_values` are placed
inside of the symbols with the name in `symbol_names` in each circuit.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
Returns:
A `tf.Tensor` containing the final state of each circuit in `programs`.
"""
return SIM_OP_MODULE.tfq_simulate_state(programs, symbol_names,
tf.cast(symbol_values, tf.float32))
def tfq_simulate_samples(programs, symbol_names, symbol_values, num_samples):
"""Generate samples using the C++ state vector simulator.
Simulate the final state of `programs` given `symbol_values` are placed
inside of the symbols with the name in `symbol_names` in each circuit.
From there we will then sample from the final state using native tensorflow
operations.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
num_samples: `tf.Tensor` with one element indicating the number of
samples to draw.
Returns:
A `tf.Tensor` containing the samples taken from each circuit in
`programs`.
"""
return SIM_OP_MODULE.tfq_simulate_samples(
programs, symbol_names, tf.cast(symbol_values, tf.float32), num_samples)
def tfq_simulate_sampled_expectation(programs, symbol_names, symbol_values,
pauli_sums, num_samples):
"""Calculate the expectation value of circuits using samples.
Simulate the final state of `programs` given `symbol_values` are placed
inside of the symbols with the name in `symbol_names` in each circuit.
Them, sample the resulting state `num_samples` times and use these samples
to compute expectation values of the given `pauli_sums`.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
num_samples: `tf.Tensor` with `num_samples[i][j]` is equal to the
number of samples to draw in each term of `pauli_sums[i][j]`
when estimating the expectation. Therefore, `num_samples` must
have the same shape as `pauli_sums`.
Returns:
`tf.Tensor` with shape [batch_size, n_ops] that holds the
expectation value for each circuit with each op applied to it
(after resolving the corresponding parameters in).
"""
return SIM_OP_MODULE.tfq_simulate_sampled_expectation(
programs, symbol_names, tf.cast(symbol_values, tf.float32), pauli_sums,
tf.cast(num_samples, dtype=tf.int32))
|
apache-2.0
| 2,645,374,472,230,637,000 | 49.955556 | 80 | 0.671319 | false |
census-instrumentation/opencensus-python
|
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py
|
1
|
17658
|
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import platform
import re
import string
import threading
from datetime import datetime
import google.auth
from google.api_core.gapic_v1 import client_info
from google.cloud import monitoring_v3
from opencensus.common import utils
from opencensus.common.monitored_resource import (
aws_identity_doc_utils,
gcp_metadata_config,
k8s_utils,
monitored_resource,
)
from opencensus.common.version import __version__
from opencensus.metrics import label_key, label_value, transport
from opencensus.metrics.export import metric as metric_module
from opencensus.metrics.export import metric_descriptor
from opencensus.stats import stats
MAX_TIME_SERIES_PER_UPLOAD = 200
OPENCENSUS_TASK = "opencensus_task"
OPENCENSUS_TASK_DESCRIPTION = "Opencensus task identifier"
DEFAULT_DISPLAY_NAME_PREFIX = "OpenCensus"
ERROR_BLANK_PROJECT_ID = "expecting a non-blank ProjectID"
CONS_NAME = "name"
CONS_TIME_SERIES = "timeseries"
EPOCH_DATETIME = datetime(1970, 1, 1)
EPOCH_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ"
GLOBAL_RESOURCE_TYPE = 'global'
# OC metric descriptor type to SD metric kind and value type
OC_MD_TO_SD_TYPE = {
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64:
(monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE,
monitoring_v3.enums.MetricDescriptor.ValueType.INT64),
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE:
(monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE,
monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE),
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION:
(monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE,
monitoring_v3.enums.MetricDescriptor.ValueType.DISTRIBUTION),
metric_descriptor.MetricDescriptorType.GAUGE_INT64:
(monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE,
monitoring_v3.enums.MetricDescriptor.ValueType.INT64),
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE:
(monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE,
monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)
}
class Options(object):
"""Exporter configuration options.
`resource` is an optional field that represents the Stackdriver monitored
resource type. If unset, this defaults to a `MonitoredResource` with type
"global" and no resource labels.
`default_monitoring_labels` are labels added to every metric created by
this exporter. If unset, this defaults to a single label with key
"opencensus_task" and value "py-<pid>@<hostname>". This default ensures
that the set of labels together with the default resource (global) are
unique to this process, as required by stackdriver.
If you set `default_monitoring_labels`, make sure that the `resource`
field together with these labels is unique to the current process. This is
to ensure that there is only a single writer to each time series in
Stackdriver.
Set `default_monitoring_labels` to `{}` to avoid getting the default
"opencensus_task" label. You should only do this if you know that
`resource` uniquely identifies this process.
:type project_id: str
:param project_id: The ID GCP project to export metrics to, fall back to
default application credentials if unset.
:type resource: str
:param resource: The stackdriver monitored resource type, defaults to
global.
:type metric_prefix: str
:param metric_prefix: Custom prefix for metric name and type.
:type default_monitoring_labels: dict(
:class:`opencensus.metrics.label_key.LabelKey`,
:class:`opencensus.metrics.label_value.LabelValue`)
:param default_monitoring_labels: Default labels to be set on each exported
metric.
"""
def __init__(self,
project_id="",
resource="",
metric_prefix="",
default_monitoring_labels=None):
self.project_id = project_id
self.resource = resource
self.metric_prefix = metric_prefix
if default_monitoring_labels is None:
self.default_monitoring_labels = {
label_key.LabelKey(OPENCENSUS_TASK,
OPENCENSUS_TASK_DESCRIPTION):
label_value.LabelValue(get_task_value())
}
else:
for key, val in default_monitoring_labels.items():
if not isinstance(key, label_key.LabelKey):
raise TypeError
if not isinstance(val, label_value.LabelValue):
raise TypeError
self.default_monitoring_labels = default_monitoring_labels
class StackdriverStatsExporter(object):
"""Stats exporter for the Stackdriver Monitoring backend."""
def __init__(self, options=None, client=None):
if options is None:
options = Options()
self._options = options
self._client = client
self._md_cache = {}
self._md_lock = threading.Lock()
@property
def options(self):
return self._options
@property
def client(self):
return self._client
def export_metrics(self, metrics):
metrics = list(metrics)
for metric in metrics:
self.register_metric_descriptor(metric.descriptor)
ts_batches = self.create_batched_time_series(metrics)
for ts_batch in ts_batches:
self.client.create_time_series(
self.client.project_path(self.options.project_id), ts_batch)
def create_batched_time_series(self, metrics,
batch_size=MAX_TIME_SERIES_PER_UPLOAD):
time_series_list = itertools.chain.from_iterable(
self.create_time_series_list(metric) for metric in metrics)
return list(utils.window(time_series_list, batch_size))
def create_time_series_list(self, metric):
if not isinstance(metric, metric_module.Metric): # pragma: NO COVER
raise ValueError
return [self._convert_series(metric, ts) for ts in metric.time_series]
def _convert_series(self, metric, ts):
"""Convert an OC timeseries to a SD series."""
series = monitoring_v3.types.TimeSeries()
series.metric.type = self.get_metric_type(metric.descriptor)
for lk, lv in self.options.default_monitoring_labels.items():
series.metric.labels[lk.key] = lv.value
for key, val in zip(metric.descriptor.label_keys, ts.label_values):
if val.value is not None:
safe_key = sanitize_label(key.key)
series.metric.labels[safe_key] = val.value
set_monitored_resource(series, self.options.resource)
for point in ts.points:
sd_point = series.points.add()
# this just modifies points, no return
self._convert_point(metric, ts, point, sd_point)
return series
def _convert_point(self, metric, ts, point, sd_point):
"""Convert an OC metric point to a SD point."""
if (metric.descriptor.type == metric_descriptor.MetricDescriptorType
.CUMULATIVE_DISTRIBUTION):
sd_dist_val = sd_point.value.distribution_value
sd_dist_val.count = point.value.count
sd_dist_val.sum_of_squared_deviation =\
point.value.sum_of_squared_deviation
sd_dist_val.mean = point.value.sum / sd_dist_val.count
assert sd_dist_val.bucket_options.explicit_buckets.bounds == []
sd_dist_val.bucket_options.explicit_buckets.bounds.extend(
[0.0] +
list(map(float, point.value.bucket_options.type_.bounds))
)
assert sd_dist_val.bucket_counts == []
sd_dist_val.bucket_counts.extend(
[0] +
[bb.count for bb in point.value.buckets]
)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
# TODO: handle SUMMARY metrics, #567
else: # pragma: NO COVER
raise TypeError("Unsupported metric type: {}"
.format(metric.descriptor.type))
end = point.timestamp
if ts.start_timestamp is None:
start = end
else:
start = datetime.strptime(ts.start_timestamp, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
sd_point.interval.end_time.seconds = int(timestamp_end)
secs = sd_point.interval.end_time.seconds
sd_point.interval.end_time.nanos = int((timestamp_end - secs) * 1e9)
start_time = sd_point.interval.start_time
start_time.seconds = int(timestamp_start)
start_time.nanos = int((timestamp_start - start_time.seconds) * 1e9)
def get_metric_type(self, oc_md):
"""Get a SD metric type for an OC metric descriptor."""
return namespaced_view_name(oc_md.name, self.options.metric_prefix)
def get_metric_descriptor(self, oc_md):
"""Convert an OC metric descriptor to a SD metric descriptor."""
try:
metric_kind, value_type = OC_MD_TO_SD_TYPE[oc_md.type]
except KeyError:
raise TypeError("Unsupported metric type: {}".format(oc_md.type))
if self.options.metric_prefix:
display_name_prefix = self.options.metric_prefix
else:
display_name_prefix = DEFAULT_DISPLAY_NAME_PREFIX
desc_labels = new_label_descriptors(
self.options.default_monitoring_labels, oc_md.label_keys)
descriptor = monitoring_v3.types.MetricDescriptor(labels=desc_labels)
metric_type = self.get_metric_type(oc_md)
descriptor.type = metric_type
descriptor.metric_kind = metric_kind
descriptor.value_type = value_type
descriptor.description = oc_md.description
descriptor.unit = oc_md.unit
descriptor.name = ("projects/{}/metricDescriptors/{}"
.format(self.options.project_id, metric_type))
descriptor.display_name = ("{}/{}"
.format(display_name_prefix, oc_md.name))
return descriptor
def register_metric_descriptor(self, oc_md):
"""Register a metric descriptor with stackdriver."""
metric_type = self.get_metric_type(oc_md)
with self._md_lock:
if metric_type in self._md_cache:
return self._md_cache[metric_type]
descriptor = self.get_metric_descriptor(oc_md)
project_name = self.client.project_path(self.options.project_id)
sd_md = self.client.create_metric_descriptor(project_name, descriptor)
with self._md_lock:
self._md_cache[metric_type] = sd_md
return sd_md
def set_monitored_resource(series, option_resource_type):
"""Set this series' monitored resource and labels.
:param series: TimeSeries object based on view data
:param option_resource_type: Resource is an optional field that
represents the Stackdriver MonitoredResource type.
"""
if option_resource_type != "":
series.resource.type = option_resource_type
return
resource = monitored_resource.get_instance()
if resource is None:
series.resource.type = GLOBAL_RESOURCE_TYPE
return
resource_type = resource.get_type()
resource_labels = resource.get_labels()
def set_attribute_label(attribute_key, label_key, label_value_prefix=''):
"""Set a label to timeseries that can be used for monitoring.
:param series: TimeSeries object based on view data
:param resource_labels: collection of labels
:param attribute_key: actual label key
:param label_key: optional exporter-specific label key
:param label_value_prefix: optional exporter-specific prefix
"""
if attribute_key not in resource_labels:
return
series.resource.labels[label_key] = (label_value_prefix +
resource_labels[attribute_key])
if resource_type == 'k8s_container':
series.resource.type = 'k8s_container'
set_attribute_label(gcp_metadata_config.PROJECT_ID_KEY, 'project_id')
set_attribute_label(k8s_utils.CLUSTER_NAME_KEY, 'cluster_name')
set_attribute_label(k8s_utils.CONTAINER_NAME_KEY, 'container_name')
set_attribute_label(k8s_utils.NAMESPACE_NAME_KEY, 'namespace_name')
set_attribute_label(k8s_utils.POD_NAME_KEY, 'pod_name')
set_attribute_label(gcp_metadata_config.ZONE_KEY, 'location')
elif resource_type == 'gce_instance':
series.resource.type = 'gce_instance'
set_attribute_label(gcp_metadata_config.PROJECT_ID_KEY, 'project_id')
set_attribute_label(gcp_metadata_config.INSTANCE_ID_KEY, 'instance_id')
set_attribute_label(gcp_metadata_config.ZONE_KEY, 'zone')
elif resource_type == 'aws_ec2_instance':
series.resource.type = 'aws_ec2_instance'
set_attribute_label(aws_identity_doc_utils.ACCOUNT_ID_KEY,
'aws_account')
set_attribute_label(aws_identity_doc_utils.INSTANCE_ID_KEY,
'instance_id')
set_attribute_label(aws_identity_doc_utils.REGION_KEY, 'region',
label_value_prefix='aws:')
else:
series.resource.type = GLOBAL_RESOURCE_TYPE
def get_user_agent_slug():
"""Get the UA fragment to identify this library version."""
return "opencensus-python/{}".format(__version__)
def new_stats_exporter(options=None, interval=None):
"""Get a stats exporter and running transport thread.
Create a new `StackdriverStatsExporter` with the given options and start
periodically exporting stats to stackdriver in the background.
Fall back to default auth if `options` is null. This will raise
`google.auth.exceptions.DefaultCredentialsError` if default credentials
aren't configured.
See `opencensus.metrics.transport.get_exporter_thread` for details on the
transport thread.
:type options: :class:`Options`
:param exporter: Options to pass to the exporter
:type interval: int or float
:param interval: Seconds between export calls.
:rtype: :class:`StackdriverStatsExporter`
:return: The newly-created exporter.
"""
if options is None:
_, project_id = google.auth.default()
options = Options(project_id=project_id)
if str(options.project_id).strip() == "":
raise ValueError(ERROR_BLANK_PROJECT_ID)
ci = client_info.ClientInfo(client_library_version=get_user_agent_slug())
client = monitoring_v3.MetricServiceClient(client_info=ci)
exporter = StackdriverStatsExporter(client=client, options=options)
transport.get_exporter_thread([stats.stats], exporter, interval=interval)
return exporter
def get_task_value():
""" getTaskValue returns a task label value in the format of
"py-<pid>@<hostname>".
"""
hostname = platform.uname()[1]
if not hostname:
hostname = "localhost"
return "py-%s@%s" % (os.getpid(), hostname)
def namespaced_view_name(view_name, metric_prefix):
""" create string to be used as metric type
"""
metric_prefix = metric_prefix or "custom.googleapis.com/opencensus"
return os.path.join(metric_prefix, view_name).replace('\\', '/')
def new_label_descriptors(defaults, keys):
""" create labels for the metric_descriptor
that will be sent to Stackdriver Monitoring
"""
label_descriptors = []
for lk in itertools.chain.from_iterable((defaults.keys(), keys)):
label = {}
label["key"] = sanitize_label(lk.key)
label["description"] = lk.description
label_descriptors.append(label)
return label_descriptors
def sanitize_label(text):
"""Remove characters not accepted in labels key
This replaces any non-word characters (alphanumeric or underscore), with
an underscore. It also ensures that the first character is a letter by
prepending with 'key' if necessary, and trims the text to 100 characters.
"""
if not text:
return text
text = re.sub('\\W+', '_', text)
if text[0] in string.digits:
text = "key_" + text
elif text[0] == '_':
text = "key" + text
return text[:100]
|
apache-2.0
| 6,420,435,971,629,859,000 | 37.980132 | 79 | 0.660211 | false |
Education-Numerique/api
|
lxxl/services/graph/activities/thumbnail.py
|
1
|
1736
|
from lxxl.lib import router, output
from lxxl.lib.app import Error, Controller
from lxxl.lib.storage import Db, DbError
from lxxl.model.activities import Activity, Factory as ActivityFactory
from lxxl.model.blob import Factory as BlobFactory
class Thumbnail(router.Root):
def save(self, environ, params):
try:
req = Controller().getRequest()
router = Controller().getRouter()
a = ActivityFactory.get(params['rid'])
if not a:
output.error('activity not found', 404)
cT = req.headers['Content-Type'] or 'application/octet-stream'
blobId = BlobFactory.getBlobIds(
activity=params['rid'],
release="draft",
type="thumbnail"
)
if not len(blobId):
blobId = BlobFactory.insert(
'thumbnail',
'draft',
req.body,
cT,
activity=params['rid']
)
else:
blobId = blobId[0]
BlobFactory.update(
blobId,
'thumbnail',
'draft',
req.body,
cT,
activity=params['rid']
)
resultUrl = router.getRoute('graph.Blob.fetch', {
'version': params['version'],
'bid': str(blobId),
'release': 'draft'
})
output.success({
'url': resultUrl,
'blobId': str(blobId)
}, 201)
except Error:
pass
return Controller().getResponse(True)
|
agpl-3.0
| -8,801,622,417,008,714,000 | 30 | 74 | 0.464862 | false |
google/neural-light-transport
|
third_party/xiuminglib/xiuminglib/os.py
|
1
|
13150
|
import os
from os.path import join, exists, isdir, dirname
from shutil import rmtree, copy2, copytree
from glob import glob
from .log import get_logger
logger = get_logger()
from .imprt import preset_import
from .interact import format_print
def _is_cnspath(path):
return isinstance(path, str) and path.startswith('/cns/')
def _is_bspath(path):
return isinstance(path, str) and path.startswith('/bigstore/')
def sortglob(directory, filename='*', ext=None, ext_ignore_case=False):
"""Globs and then sorts filenames, possibly ending with multiple
extensions, in a directory.
Supports Google Colossus, by using ``gfile`` (preferred for speed)
or the ``fileutil`` CLI when Blaze is not used (hence, ``gfile``
unavailable).
Args:
directory (str): Directory to glob, e.g., ``'/path/to/'``.
filename (str or tuple(str), optional): Filename pattern excluding
extensions, e.g., ``'img*'``.
ext (str or tuple(str), optional): Extensions of interest, e.g.,
``('png', 'jpg')``. ``None`` means no extension, useful for
folders or files with no extension.
ext_ignore_case (bool, optional): Whether to ignore case for
extensions.
Returns:
list(str): Sorted list of files globbed.
"""
def glob_cns_cli(pattern):
cmd = 'fileutil ls -d %s' % pattern # -d to avoid recursively
_, stdout, _ = call(cmd, quiet=True)
return [x for x in stdout.split('\n') if x != '']
def glob_bs_cli(pattern):
cmd = '/google/data/ro/projects/cloud/bigstore/fileutil_bs ls -d %s' \
% pattern # -d to avoid recursively
_, stdout, _ = call(cmd, quiet=True)
return [x for x in stdout.split('\n') if x != '']
if _is_cnspath(directory):
# Is a CNS path
gfile = preset_import('gfile')
if gfile is None:
glob_func = glob_cns_cli
else:
glob_func = gfile.Glob
elif _is_bspath(directory):
# Is a Bigstore path
gfile = preset_import('gfile')
if gfile is None:
glob_func = glob_bs_cli
else:
glob_func = gfile.Glob
else:
# Is just a regular local path
glob_func = glob
if ext is None:
ext = ()
elif isinstance(ext, str):
ext = (ext,)
if isinstance(filename, str):
filename = (filename,)
ext_list = []
for x in ext:
if not x.startswith('.'):
x = '.' + x
if ext_ignore_case:
ext_list += [x.lower(), x.upper()]
else:
ext_list.append(x)
files = []
for f in filename:
if ext_list:
for e in ext_list:
files += glob_func(join(directory, f + e))
else:
files += glob_func(join(directory, f))
files_sorted = sorted(files)
return files_sorted
def exists_isdir(path):
"""Determines whether a path exists, and if so, whether it is a file
or directory.
Supports Google Colossus (CNS) paths by using ``gfile`` (preferred for
speed) or the ``fileutil`` CLI.
Args:
path (str): A path.
Returns:
tuple:
- **exists** (*bool*) -- Whether the path exists.
- **isdir** (*bool*) -- Whether the path is a file or directory.
``None`` if the path doesn't exist.
"""
path = _no_trailing_slash(path)
# If local path, do the job quickly and return
if not _is_cnspath(path):
path_exists = exists(path)
path_isdir = isdir(path) if path_exists else None
return path_exists, path_isdir
gfile = preset_import('gfile')
# Using fileutil CLI
if gfile is None:
testf, _, _ = call('fileutil test -f %s' % path)
testd, _, _ = call('fileutil test -d %s' % path)
if testf == 1 and testd == 1:
path_exists = False
path_isdir = None
elif testf == 1 and testd == 0:
path_exists = True
path_isdir = True
elif testf == 0 and testd == 1:
path_exists = True
path_isdir = False
else:
raise NotImplementedError("What does this even mean?")
# Using gfile
else:
path_exists = gfile.Exists(path)
if path_exists:
path_isdir = gfile.IsDirectory(path)
else:
path_isdir = None
return path_exists, path_isdir
def _no_trailing_slash(path):
if path.endswith('/'):
path = path[:-1]
assert not path.endswith('/'), "path shouldn't end with '//'"
# Guaranteed to not end with '/', so basename() or dirname()
# will give the correct results
return path
def _select_gfs_user(writeto):
"""As whom we perform file operations.
Useful for operations on a folder whose owner is a Ganpati group (e.g.,
``gcam-gpu``).
"""
gfile = preset_import('gfile')
writeto = _no_trailing_slash(writeto)
writeto_exists, writeto_isdir = exists_isdir(writeto)
if writeto_exists and writeto_isdir:
# OK as long as we can write to it
writeto_folder = writeto
else:
# Doesn't exist yet or is a file, so we need to write to its parent
writeto_folder = dirname(writeto)
if gfile is None:
stdout = _call_assert_success(
'fileutil ls -l -d %s' % writeto_folder, quiet=True)
assert stdout.count('\n') == 1, \
"`fileuti ls` results should have one line only"
owner = stdout.strip().split(' ')[2]
else:
owner = gfile.Stat(writeto_folder).owner
return owner
def cp(src, dst, cns_parallel_copy=10):
"""Copies files, possibly from/to the Google Colossus Filesystem.
Args:
src (str): Source file or directory.
dst (str): Destination file or directory.
cns_parallel_copy (int): The number of files to be copied in
parallel. Only effective when copying a directory from/to
Colossus.
Raises:
FileNotFoundError: If the source doesn't exist.
"""
src = _no_trailing_slash(src)
dst = _no_trailing_slash(dst)
srcexists, srcisdir = exists_isdir(src)
if not srcexists:
raise FileNotFoundError("Source must exist")
# When no CNS paths involved, quickly do the job and return
if not _is_cnspath(src) and not _is_cnspath(dst):
if srcisdir:
for x in os.listdir(src):
s = join(src, x)
d = join(dst, x)
if isdir(s):
copytree(s, d)
else:
copy2(s, d)
else:
copy2(src, dst)
return
gfile = preset_import('gfile')
if gfile is None:
cmd = 'fileutil cp -f -colossus_parallel_copy '
if srcisdir:
cmd += '-R -parallel_copy=%d %s ' % \
(cns_parallel_copy, join(src, '*'))
else:
cmd += '%s ' % src
cmd += '%s' % dst
# Destination directory may be owned by a Ganpati group
if _is_cnspath(dst):
cmd += ' --gfs_user %s' % _select_gfs_user(dst)
_ = _call_assert_success(cmd)
else:
with gfile.AsUser(_select_gfs_user(dst)):
if srcisdir:
gfile.RecursivelyCopyDir(src, dst, overwrite=True)
else:
gfile.Copy(src, dst, overwrite=True)
def rm(path):
"""Removes a file or recursively a directory, with Google Colossus
compatibility.
Args:
path (str)
"""
if not _is_cnspath(path):
# Quickly do the job and return
if exists(path):
if isdir(path):
rmtree(path)
else:
os.remove(path)
return
# OK, a CNS path
# Use gfile if available
gfile = preset_import('gfile')
if gfile is not None:
gfile.DeleteRecursively(path) # works for file and directory
else:
# Falls back to filter CLI
cmd = 'fileutil rm -R -f %s' % path # works for file and directory
_ = _call_assert_success(cmd, quiet=True)
def makedirs(directory, rm_if_exists=False):
"""Wraps :func:`os.makedirs` to support removing the directory if it
alread exists.
Google Colossus-compatible: it tries to use ``gfile`` first for speed. This
will fail if Blaze is not used, in which case it then falls back to using
``fileutil`` CLI as external process calls.
Args:
directory (str)
rm_if_exists (bool, optional): Whether to remove the directory (and
its contents) if it already exists.
"""
def exists_cns_cli(directory):
cmd = 'fileutil test -d %s' % directory
retcode, _, _ = call(cmd, quiet=True)
if retcode == 0:
return True
if retcode == 1:
return False
raise ValueError(retcode)
def mkdir_cns_cli(directory):
cmd = 'fileutil mkdir -p %s' % directory
_ = _call_assert_success(cmd, quiet=True)
if _is_cnspath(directory):
# Is a CNS path
gfile = preset_import('gfile')
if gfile is None:
exists_func = exists_cns_cli
mkdir_func = mkdir_cns_cli
else:
exists_func = gfile.Exists
mkdir_func = gfile.MakeDirs
else:
# Is just a regular local path
exists_func = exists
mkdir_func = os.makedirs
# Do the job
if exists_func(directory):
if rm_if_exists:
rm(directory)
mkdir_func(directory)
logger.info("Removed and then remade:\n\t%s", directory)
else:
mkdir_func(directory)
def make_exp_dir(directory, param_dict, rm_if_exists=False):
"""Makes an experiment output folder by hashing the experiment parameters.
Args:
directory (str): The made folder will be under this.
param_dict (dict): Dictionary of the parameters identifying the
experiment. It is sorted by its keys, so different orders lead to
the same hash.
rm_if_exists (bool, optional): Whether to remove the experiment folder
if it already exists.
Writes
- The experiment parameters in ``<directory>/<hash>/param.json``.
Returns:
str: The experiment output folder just made.
"""
from collections import OrderedDict
from json import dump
hash_seed = os.environ.get('PYTHONHASHSEED', None)
if hash_seed != '0':
logger.warning(
("PYTHONHASHSEED is not 0, so the same param_dict has different "
"hashes across sessions. Consider disabling this randomization "
"with `PYTHONHASHSEED=0 python your_script.py`"))
param_dict = OrderedDict(sorted(param_dict.items()))
param_hash = str(hash(str(param_dict)))
assert param_hash != '' # gotta be careful because of rm_if_exists
directory = join(directory, param_hash)
makedirs(directory, rm_if_exists=rm_if_exists)
# Write parameters into a .json
json_f = join(directory, 'param.json')
with open(json_f, 'w') as h:
dump(param_dict, h, indent=4, sort_keys=True)
logger.info("Parameters dumped to: %s", json_f)
return directory
def fix_terminal():
"""Fixes messed up terminal."""
from shlex import split
from subprocess import Popen, DEVNULL
cmd = 'stty sane'
child = Popen(split(cmd), stdout=DEVNULL, stderr=DEVNULL)
_, _ = child.communicate()
def call(cmd, cwd=None, wait=True, quiet=False):
"""Executes a command in shell.
Args:
cmd (str): Command to be executed.
cwd (str, optional): Directory to execute the command in. ``None``
means current directory.
wait (bool, optional): Whether to block until the call finishes.
quiet (bool, optional): Whether to print out the output stream (if any)
and error stream (if error occured).
Returns:
tuple:
- **retcode** (*int*) -- Command exit code. 0 means a successful
call. Always ``None`` if not waiting for the command to finish.
- **stdout** (*str*) -- Standard output stream. Always ``None`` if
not waiting.
- **stderr** (*str*) -- Standard error stream. Always ``None`` if
not waiting.
"""
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=cwd, shell=True)
if not wait:
return None, None, None
stdout, stderr = process.communicate() # waits for completion
stdout, stderr = stdout.decode(), stderr.decode()
if not quiet:
if stdout != '':
format_print(stdout, 'O')
if process.returncode != 0:
if stderr != '':
format_print(stderr, 'E')
retcode = process.returncode
return retcode, stdout, stderr
def _call_assert_success(cmd, **kwargs):
retcode, stdout, _ = call(cmd, **kwargs)
assert retcode == 0, \
"External process call failed with exit code {code}:\n\t{cmd}".format(
cmd=cmd, code=retcode)
return stdout
|
apache-2.0
| -5,142,542,116,938,487,000 | 29.941176 | 79 | 0.580913 | false |
MarkusH/django-osm-field
|
tests/test_fields.py
|
1
|
10045
|
# -*- coding: utf-8 -*-
import copy
from unittest import skipIf
import django
from django.db import models
from django.test import SimpleTestCase, TestCase
from osm_field.fields import LatitudeField, Location, LongitudeField, OSMField
from osm_field.validators import validate_latitude, validate_longitude
from osm_field.widgets import OSMWidget
from .models import (
CustomNamingModel,
DefaultNamingModel,
MixedNamingModel,
MultipleNamingModel,
)
try:
from django.core.checks import Error
except ImportError:
pass
def foo_validator(value):
pass
BERLIN = Location(lat=52.5167, lon=13.3830, text="Berlin")
NEW_YORK = Location(lat=40.7127, lon=-74.005, text="New York")
@skipIf(
django.VERSION[:2] < (1, 7),
"Model field deconstruction has been introduced in Django 1.7",
)
class TestDeconstruction(TestCase):
def test_latitude_field(self):
field = LatitudeField()
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
self.assertEqual(path, "osm_field.fields.LatitudeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"validators": [validate_latitude]})
def test_latitude_field_with_validator(self):
field = LatitudeField(validators=[foo_validator])
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
self.assertEqual(path, "osm_field.fields.LatitudeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"validators": [foo_validator, validate_latitude]})
def test_longitude_field(self):
field = LongitudeField()
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
self.assertEqual(path, "osm_field.fields.LongitudeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"validators": [validate_longitude]})
def test_longitude_field_with_validator(self):
field = LongitudeField(validators=[foo_validator])
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
self.assertEqual(path, "osm_field.fields.LongitudeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"validators": [foo_validator, validate_longitude]})
def test_osm_field(self):
field = OSMField()
field.set_attributes_from_name("location")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "location")
self.assertEqual(path, "osm_field.fields.OSMField")
self.assertEqual(args, [])
self.assertEqual(
kwargs, {"lat_field": "location_lat", "lon_field": "location_lon"}
)
def test_osm_field_with_args(self):
field = OSMField(lat_field="some_lat_field", lon_field="some_lon_field")
field.set_attributes_from_name("location")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "location")
self.assertEqual(path, "osm_field.fields.OSMField")
self.assertEqual(args, [])
self.assertEqual(
kwargs, {"lat_field": "some_lat_field", "lon_field": "some_lon_field"}
)
def test_osm_field_raise_without_name(self):
field = OSMField()
self.assertRaisesRegex(TypeError, "unsupported operand", field.deconstruct)
class TestFieldChecks(TestCase):
def setUp(self):
# Taken from IsolatedModelsTestCase in
# django/tests/invalid_models_tests/base.py
from django.apps import apps
self._old_models = apps.app_configs["tests"].models.copy()
def tearDown(self):
# Taken from IsolatedModelsTestCase in
# django/tests/invalid_models_tests/base.py
from django.apps import apps
apps.app_configs["tests"].models = self._old_models
apps.all_models["tests"] = self._old_models
apps.clear_cache()
def test_no_missing_fields(self):
class Model(models.Model):
location = OSMField()
location_lat = LatitudeField()
location_lon = LongitudeField()
checks = []
expected = []
field = Model._meta.get_field("location")
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_missing_fields(self):
class Model(models.Model):
location = OSMField()
checks = []
field = Model._meta.get_field("location")
expected = [
Error(
"The OSMField 'location' references the non-existent latitude "
"field 'location_lat'.",
hint=None,
obj=field,
id="osm_field.E001",
),
Error(
"The OSMField 'location' references the non-existent longitude "
"field 'location_lon'.",
hint=None,
obj=field,
id="osm_field.E002",
),
]
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_no_missing_fields_exclicitly_given(self):
class Model(models.Model):
location = OSMField(lat_field="latitude", lon_field="longitude")
latitude = LatitudeField()
longitude = LongitudeField()
checks = []
expected = []
field = Model._meta.get_field("location")
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_missing_fields_exclicitly_given(self):
class Model(models.Model):
location = OSMField(lat_field="lat", lon_field="lon")
checks = []
field = Model._meta.get_field("location")
expected = [
Error(
"The OSMField 'location' references the non-existent latitude "
"field 'lat'.",
hint=None,
obj=field,
id="osm_field.E001",
),
Error(
"The OSMField 'location' references the non-existent longitude "
"field 'lon'.",
hint=None,
obj=field,
id="osm_field.E002",
),
]
checks.extend(field.check())
self.assertEqual(checks, expected)
class TestFormFields(TestCase):
def test_latitude_field(self):
field = LatitudeField()
field.set_attributes_from_name("location_lat")
formfield = field.formfield()
self.assertEqual(formfield.max_value, 90)
self.assertEqual(formfield.min_value, -90)
def test_longitude_field(self):
field = LongitudeField()
field.set_attributes_from_name("location_lon")
formfield = field.formfield()
self.assertEqual(formfield.max_value, 180)
self.assertEqual(formfield.min_value, -180)
def test_osm_field(self):
field = OSMField()
field.set_attributes_from_name("location")
formfield = field.formfield()
self.assertIsInstance(formfield.widget, OSMWidget)
self.assertEqual(
formfield.widget.attrs,
{
"class": "osmfield",
"data-lat-field": "location_lat",
"data-lon-field": "location_lon",
},
)
def test_osm_field_different_names(self):
field = OSMField(lat_field="some_lat_field", lon_field="some_lon_field")
field.set_attributes_from_name("location")
formfield = field.formfield()
self.assertIsInstance(formfield.widget, OSMWidget)
self.assertEqual(
formfield.widget.attrs,
{
"class": "osmfield",
"data-lat-field": "some_lat_field",
"data-lon-field": "some_lon_field",
},
)
class TestModels(TestCase):
def test_custom_naming(self):
item = CustomNamingModel.objects.create(
location="Berlin", latitude=52.5167, longitude=13.383
)
self.assertEqual(item.get_location_info(), BERLIN)
self.assertNotEqual(item.get_location_info(), NEW_YORK)
def test_default_naming(self):
item = DefaultNamingModel.objects.create(
location="Berlin", location_lat=52.5167, location_lon=13.383
)
self.assertEqual(item.get_location_info(), BERLIN)
self.assertNotEqual(item.get_location_info(), NEW_YORK)
def test_mixed_naming(self):
item = MixedNamingModel.objects.create(
location="Berlin", location_lat=52.5167, longitude=13.383
)
self.assertEqual(item.get_location_info(), BERLIN)
self.assertNotEqual(item.get_location_info(), NEW_YORK)
def test_multiple_naming(self):
item = MultipleNamingModel.objects.create(
default_location="Berlin",
default_location_lat=52.5167,
default_location_lon=13.383,
custom_location="New York",
custom_latitude=40.7127,
custom_longitude=-74.005,
)
self.assertEqual(item.get_default_location_info(), BERLIN)
self.assertEqual(item.get_custom_location_info(), NEW_YORK)
self.assertNotEqual(item.get_default_location_info(), NEW_YORK)
self.assertNotEqual(item.get_custom_location_info(), BERLIN)
class TestLocation(SimpleTestCase):
def test_compare(self):
self.assertEqual(BERLIN, BERLIN)
self.assertNotEqual(BERLIN, NEW_YORK)
def test_copy(self):
berlin_new = copy.copy(BERLIN)
self.assertEqual(BERLIN, berlin_new)
self.assertIsNot(BERLIN, berlin_new)
def test_repr(self):
self.assertEqual(
"<Location lat=52.516700 lon=13.383000 text=Berlin>", repr(BERLIN)
)
self.assertEqual(
"<Location lat=40.712700 lon=-74.005000 text=New York>", repr(NEW_YORK)
)
def test_string(self):
self.assertEqual("Berlin (52.516700, 13.383000)", str(BERLIN))
self.assertEqual("New York (40.712700, -74.005000)", str(NEW_YORK))
|
mit
| 917,209,673,852,079,500 | 33.400685 | 85 | 0.607068 | false |
sighill/shade_app
|
apis/raw/001_raw/001_cleaner.py
|
1
|
1564
|
# 001_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'D:/Projets/shade_django/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , StrValidator
##################################
# Init des paths et noms de fichiers
AddLog('title' , 'Début du nettoyage du fichier')
work_dir = 'D:/Projets/shade_django/apis/raw/001_raw/'
# Nom du fichier source
raw_file = 'src'
##################################
# Création de la liste brute
raw_list = open(work_dir + raw_file , 'r').read().splitlines()
##################################
# Formatage du texte
# Init de la list contenant la sortie de StringFormatter
formatted_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in raw_list:
formatted_list.append(StringFormatter(line))
##################################
# Passage par OdditiesFinder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( formatted_list )
##################################
# Validation manuelle du texte
AddLog('subtitle' , 'Début de la fonction StrValidator')
ref_list = StrValidator(list_without_oddities)
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('D:/Projets/shade_django/apis/out/','001_src',ref_list,'regions italiennes')
|
mit
| -4,217,137,144,357,580,000 | 33.409091 | 90 | 0.581515 | false |
levilucio/SyVOLT
|
ECore_Copier_MM/transformation-Large/HepackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier.py
|
1
|
5036
|
from core.himesis import Himesis
class HepackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HepackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HepackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier, self).__init__(name='HepackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """epackageOUTeClassifiersSolveRefEPackageEClassifierEPackageEClassifier"""
self["GUID__"] = 351604494805091508
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 7039868393087422491
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 1477928753433573964
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 2754520279520003756
self.vs[3]["associationType"] = """eClassifiers"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 593090873380126749
self.vs[4]["associationType"] = """eClassifiers"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 585886738412324839
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EPackage"""
self.vs[5]["mm__"] = """EPackage"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 685021359161639493
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 5718550699040880859
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EClassifier"""
self.vs[7]["mm__"] = """EClassifier"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 3598392122837208827
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 7330064760639382811
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EPackage"""
self.vs[9]["mm__"] = """EPackage"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 8733323845962680094
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 3173403289481367747
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EClassifier"""
self.vs[11]["mm__"] = """EClassifier"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 1596699566864324608
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 2126932846553248712
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 2993463213060030230
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 7923878068234677860
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 5246028512463363603
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 7397437738047185457
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 8823170035885049361
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 8317562487733194393
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 4085766709339504124
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 2247421823400118325
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 778126912712255342
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 7203967929709660600
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 3743715314248397306
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 2929440278088873336
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 4049813633524506505
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 1132026222897065622
|
mit
| -3,944,586,932,985,144,300 | 47.893204 | 298 | 0.518864 | false |
roam/machete
|
machete/vendor/marshmallow/fields.py
|
1
|
25137
|
# -*- coding: utf-8 -*-
"""Field classes for formatting and validating the serialized object.
"""
# Adapted from https://github.com/twilio/flask-restful/blob/master/flask_restful/fields.py.
# See the `NOTICE <https://github.com/sloria/marshmallow/blob/master/NOTICE>`_
# file for more licensing information.
from __future__ import absolute_import
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from functools import wraps
import inspect
from . import validate, utils, class_registry
from .base import FieldABC, SerializerABC
from .compat import (text_type, OrderedDict, iteritems, total_seconds,
basestring)
from .exceptions import MarshallingError
__all__ = [
'validated',
'Marshaller',
'Raw',
'Nested',
'List',
'String',
'UUID',
'Number',
'Integer',
'Boolean',
'FormattedString',
'Float',
'Arbitrary',
'DateTime',
'LocalDateTime',
'Time',
'Date',
'TimeDelta',
'Fixed',
'ZERO',
'Price',
'Url',
'Email',
'Method',
'Function',
'Select',
'Enum',
]
def validated(f):
"""Decorator that wraps a field's ``output`` method.
If the field is required and the value is missing, we raise a
MarshallingError immediately. Otherwise, if an exception is raised
during the execution of the wrapped method or the field object's
``validate`` function evaluates to ``False``, a MarshallingError
is raised with the underlying exception's error message or the
user-defined error message (if defined).
"""
@wraps(f)
def decorated(self, *args, **kwargs):
if hasattr(self, 'required'):
value = self.get_value(args[0], args[1])
if self.required and value is None:
raise MarshallingError('Missing data for required field.')
try:
output = f(self, *args, **kwargs)
if hasattr(self, 'validate') and callable(self.validate):
if not self.validate(output):
msg = 'Validator {0}({1}) is not True'.format(
self.validate.__name__, output
)
raise MarshallingError(getattr(self, "error", None) or msg)
return output
# TypeErrors should be raised if fields are not declared as instances
except TypeError:
raise
except Exception as error:
raise MarshallingError(getattr(self, "error", None) or error)
return decorated
class Marshaller(object):
"""Callable class responsible for marshalling data and storing errors.
:param str prefix: Optional prefix that will be prepended to all the
serialized field names.
:param bool strict: If ``True``, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param callable error_handler: Error handling function that receieves a
dictionary of stored errors.
"""
def __init__(self, prefix='', strict=False, error_handler=None):
self.prefix = prefix
self.strict = strict
self.errors = {}
def marshal(self, data, fields_dict, many=False):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and filters the data based on those fields.
:param data: The actual object(s) from which the fields are taken from
:param dict fields: A dict whose keys will make up the final serialized
response output.
:param bool many: Set to ``True`` if ``data`` is a collection object
that is iterable.
:returns: An OrderedDict of the marshalled data
"""
if many and data is not None:
return [self.marshal(d, fields_dict, many=False) for d in data]
items = []
for attr_name, field_obj in iteritems(fields_dict):
key = self.prefix + attr_name
try:
item = (key, field_obj.output(attr_name, data))
except MarshallingError as err: # Store errors
if self.strict:
raise err
self.errors[key] = text_type(err)
item = (key, None)
except TypeError:
# field declared as a class, not an instance
if (isinstance(field_obj, type) and
issubclass(field_obj, FieldABC)):
msg = ('Field for "{0}" must be declared as a '
"Field instance, not a class. "
'Did you mean "fields.{1}()"?'
.format(attr_name, field_obj.__name__))
raise TypeError(msg)
raise
items.append(item)
return OrderedDict(items)
# Make an instance callable
__call__ = marshal
# Singleton marshaller function for use in this module
marshal = Marshaller(strict=True)
def _get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if type(key) == int:
return _get_value_for_key(key, obj, default)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if isinstance(key, basestring) and hasattr(obj, key):
return getattr(obj, key)
if utils.is_indexable_but_not_string(obj):
try:
return obj[key]
except KeyError:
return default
return default
class Raw(FieldABC):
"""Basic field from which other fields should extend. It applies no
formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a MarshallingError in case of parsing problem.
:param default: Default value for the field if the attribute is not set.
:param str attribute: The name of the attribute to get the value from. If
``None``, assumes the attribute has the same name as the field.
:param str error: Error message stored upon validation failure.
:param callable validate: Validation function that takes the output as its
only paramter and returns a boolean. If it returns False, a
MarshallingError is raised.
:param bool required: Make a field required. If a field is ``None``,
raise a MarshallingError.
"""
def __init__(self, default=None, attribute=None, error=None,
validate=None, required=False):
self.attribute = attribute
self.default = default
self.error = error
self.validate = validate
self.required = required
def get_value(self, key, obj):
"""Return the value for a given key from an object."""
check_key = key if self.attribute is None else self.attribute
return _get_value(check_key, obj)
def format(self, value):
"""Formats a field's value. No-op by default, concrete fields should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingError: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
@validated
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result.
:param str key: The attibute or key to get.
:param str obj: The object to pull the key from.
:exception MarshallingError: In case of validation or formatting problem
"""
value = self.get_value(key, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest a :class:`Serializer <marshmallow.Serializer>`
inside a field.
Examples: ::
user = fields.Nested(UserSerializer)
user2 = fields.Nested('UserSerializer') # Equivalent to above
collaborators = fields.Nested(UserSerializer(many=True, only='id'))
parent = fields.Nested('self')
:param Serializer nested: The Serializer class, instance, or class name (string)
to nest, or ``"self"`` to nest the serializer within itself.
:param tuple exclude: A list or tuple of fields to exclude.
:param only: A tuple or string of the field(s) to marshal. If ``None``, all fields
will be marshalled. If a field name (string) is given, only a single
value will be returned as output instead of a dictionary.
This parameter takes precedence over ``exclude``.
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param bool many: Whether the field is a collection of objects.
"""
def __init__(self, nested, exclude=None, only=None, allow_null=False,
many=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
self.only = only
self.exclude = exclude or ()
self.many = many
self.__serializer = None
self.__updated_fields = False # ensures serializer fields are updated
# only once
super(Nested, self).__init__(**kwargs)
def __get_fields_to_marshal(self, all_fields):
"""Filter all_fields based on self.only and self.exclude """
# Default 'only' to all the nested fields
ret = OrderedDict()
if all_fields is None:
return ret
elif isinstance(self.only, basestring):
ret[self.only] = all_fields[self.only]
return ret
else:
only = set(all_fields) if self.only is None else set(self.only)
if self.exclude and self.only:
# Make sure that only takes precedence
exclude = set(self.exclude) - only
else:
exclude = set([]) if self.exclude is None else set(self.exclude)
filtered = ((k, v) for k, v in all_fields.items()
if k in only and k not in exclude)
return OrderedDict(filtered)
@property
def serializer(self):
"""The nested Serializer object."""
# Cache the serializer instance
if not self.__serializer:
if isinstance(self.nested, SerializerABC):
self.__serializer = self.nested
elif isinstance(self.nested, type) and \
issubclass(self.nested, SerializerABC):
self.__serializer = self.nested(None, many=self.many)
elif isinstance(self.nested, basestring):
if self.nested == 'self':
self.__serializer = self.parent # The serializer this fields belongs to
# For now, don't allow nesting of depth > 1
self.exclude += (self.name, ) # Exclude this field
else:
serializer_class = class_registry.get_class(self.nested)
self.__serializer = serializer_class(None, many=self.many)
else:
raise ValueError("Nested fields must be passed a Serializer, not {0}."
.format(self.nested.__class__))
return self.__serializer
def output(self, key, obj):
nested_obj = self.get_value(key, obj)
if self.allow_null and nested_obj is None:
return None
self.serializer.many = self.many
self.serializer.obj = nested_obj
if not self.__updated_fields:
self.__updated_fields = True
self.serializer._update_fields(nested_obj)
fields = self.__get_fields_to_marshal(self.serializer.fields)
try:
ret = self.serializer.marshal(nested_obj, fields, many=self.many)
except TypeError as err:
raise TypeError('Could not marshal nested object due to error:\n"{0}"\n'
'If the nested object is a collection, you need to set '
'"many=True".'.format(err))
# Parent should get any errors stored after marshalling
if self.serializer.errors:
self.parent.errors[key] = self.serializer.errors
if isinstance(self.only, basestring): # self.only is a field name
if self.many:
return flatten(ret, key=self.only)
else:
return ret[self.only]
return ret
def flatten(dictlist, key):
"""Flattens a list of dicts into just a list of values.
::
>>> d = [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}]
>>> flatten(d, 'id')
[1, 2]
"""
return [d[key] for d in dictlist]
class List(Raw):
"""A list field.
Example: ::
numbers = fields.List(fields.Float)
:param cls_or_instance: A field class or instance.
"""
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, FieldABC):
raise MarshallingError("The type of the list elements "
"must be a subclass of "
"marshmallow.base.FieldABC")
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, FieldABC):
raise MarshallingError("The instances of the list "
"elements must be of type "
"marshmallow.base.FieldABC")
self.container = cls_or_instance
@validated
def output(self, key, data):
value = self.get_value(key, data)
# we cannot really test for external dict behavior
if utils.is_indexable_but_not_string(value) and not isinstance(value, dict):
# Convert all instances in typed list to container type
return [self.container.output(idx, value) for idx
in range(len(value))]
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
"""A string field."""
def __init__(self, default='', attribute=None, *args, **kwargs):
return super(String, self).__init__(default, attribute, *args, **kwargs)
def format(self, value):
try:
return text_type(value)
except ValueError as ve:
raise MarshallingError(self.error or ve)
class UUID(String):
"""A UUID field."""
pass
class Number(Raw):
'''Base class for number fields.'''
num_type = float
def __init__(self, default=0.0, attribute=None, as_string=False, error=None, **kwargs):
self.as_string = as_string
super(Number, self).__init__(default=default, attribute=attribute,
error=error, **kwargs)
def _format_num(self, value):
'''Return the correct value for a number, given the passed in
arguments to __init__.
'''
if self.as_string:
return repr(self.num_type(value))
else:
return self.num_type(value)
def format(self, value):
try:
if value is None:
return self._format_num(self.default)
return self._format_num(value)
except ValueError as ve:
raise MarshallingError(ve)
class Integer(Number):
"""An integer field.
:param bool as_string: If True, format the value as a string.
"""
num_type = int
def __init__(self, default=0, attribute=None, as_string=False, error=None, **kwargs):
self.as_string = as_string
super(Number, self).__init__(default=default, attribute=attribute,
error=error, **kwargs)
class Boolean(Raw):
'''A boolean field.'''
def format(self, value):
return bool(value)
class FormattedString(Raw):
def __init__(self, src_str):
super(FormattedString, self).__init__()
self.src_str = text_type(src_str)
@validated
def output(self, key, obj):
try:
data = utils.to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingError(error)
class Float(Number):
"""
A double as IEEE-754 double precision string.
:param bool as_string: If True, format the value as a string.
"""
num_type = float
class Arbitrary(Number):
"""A floating point number with an arbitrary precision,
formatted as as string.
ex: 634271127864378216478362784632784678324.23432
"""
# No as_string param
def __init__(self, default=0, attribute=None, **kwargs):
super(Arbitrary, self).__init__(default=default, attribute=attribute, **kwargs)
def format(self, value):
try:
if value is None:
return text_type(utils.float_to_decimal(float(self.default)))
return text_type(utils.float_to_decimal(float(value)))
except ValueError as ve:
raise MarshallingError(ve)
DATEFORMAT_FUNCTIONS = {
"iso": utils.isoformat,
"rfc": utils.rfcformat,
}
class DateTime(Raw):
"""A formatted datetime string in UTC.
ex. ``"Sun, 10 Nov 2013 07:23:45 -0000"``
:param str format: Either ``"rfc"`` (for RFC822), ``"iso"`` (for ISO8601),
or a date format string. If ``None``, defaults to "rfc".
:param default: Default value for the field if the attribute is not set.
:param str attribute: The name of the attribute to get the value from. If
``None``, assumes the attribute has the same name as the field.
"""
localtime = False
def __init__(self, format=None, default=None, attribute=None, **kwargs):
super(DateTime, self).__init__(default=default, attribute=attribute, **kwargs)
self.dateformat = format
def format(self, value):
self.dateformat = self.dateformat or 'rfc'
format_func = DATEFORMAT_FUNCTIONS.get(self.dateformat, None)
if format_func:
return format_func(value, localtime=self.localtime)
else:
return value.strftime(self.dateformat)
class LocalDateTime(DateTime):
"""A formatted datetime string in localized time, relative to UTC.
ex. ``"Sun, 10 Nov 2013 08:23:45 -0600"``
Takes the same arguments as :class:`DateTime <marshmallow.fields.DateTime>`.
"""
localtime = True
class Time(Raw):
"""ISO8601-formatted time string."""
def format(self, value):
try:
ret = value.isoformat()
except AttributeError:
raise MarshallingError('{0} cannot be formatted as a time.'
.format(repr(value)))
if value.microsecond:
return ret[:12]
return ret
class Date(Raw):
"""ISO8601-formatted date string."""
def format(self, value):
try:
return value.isoformat()
except AttributeError:
raise MarshallingError('{0} cannot be formatted as a date.'
.format(repr(value)))
return value
class TimeDelta(Raw):
'''Formats time delta objects, returning the total number of seconds
as a float.
'''
def format(self, value):
try:
return total_seconds(value)
except AttributeError:
raise MarshallingError('{0} cannot be formatted as a timedelta.'
.format(repr(value)))
return value
ZERO = MyDecimal()
class Fixed(Number):
"""A fixed-precision number as a string.
"""
def __init__(self, decimals=5, default=0, attribute=None, error=None,
*args, **kwargs):
super(Fixed, self).__init__(default=default, attribute=attribute, error=error,
*args, **kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = utils.float_to_decimal(float(value))
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingError('Invalid Fixed precision number.')
return text_type(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
class Price(Fixed):
def __init__(self, decimals=2, **kwargs):
super(Price, self).__init__(decimals=decimals, **kwargs)
class Url(Raw):
"""A validated URL field.
:param default: Default value for the field if the attribute is not set.
:param str attribute: The name of the attribute to get the value from. If
``None``, assumes the attribute has the same name as the field.
:param bool relative: Allow relative URLs.
"""
def __init__(self, default=None, attribute=None, relative=False, *args, **kwargs):
super(Url, self).__init__(default=default, attribute=attribute,
*args, **kwargs)
self.relative = relative
@validated
def output(self, key, obj):
value = self.get_value(key, obj)
if value is None:
return self.default
return validate.url(value, relative=self.relative)
class Email(Raw):
"""A validated email field.
"""
@validated
def output(self, key, obj):
value = self.get_value(key, obj)
if value is None:
return self.default
return validate.email(value)
def get_args(func):
"""Return a tuple of argument names for a function."""
return inspect.getargspec(func).args
def _callable(obj):
"""Checks that an object is callable, else raises a ``MarshallingError``.
"""
if not callable(obj):
raise MarshallingError('Object {0!r} is not callable.'.format(obj))
return obj
class Method(Raw):
"""A field that takes the value returned by a Serializer method.
:param str method_name: The name of the Serializer method from which
to retrieve the value. The method must take a single argument ``obj``
(in addition to self) that is the object to be serialized.
"""
def __init__(self, method_name, **kwargs):
self.method_name = method_name
super(Method, self).__init__(**kwargs)
@validated
def output(self, key, obj):
try:
method = _callable(getattr(self.parent, self.method_name, None))
if len(get_args(method)) > 2:
if self.parent.context is None:
msg = 'No context available for Method field {0!r}'.format(key)
raise MarshallingError(msg)
return method(obj, self.parent.context)
else:
return method(obj)
except AttributeError:
pass
class Function(Raw):
"""A field that takes the value returned by a function.
:param function func: A callable function from which to retrieve the value.
The function must take a single argument ``obj`` which is the object
to be serialized.
"""
def __init__(self, func, **kwargs):
super(Function, self).__init__(**kwargs)
self.func = _callable(func)
@validated
def output(self, key, obj):
try:
if len(get_args(self.func)) > 1:
if self.parent.context is None:
msg = 'No context available for Function field {0!r}'.format(key)
raise MarshallingError(msg)
return self.func(obj, self.parent.context)
else:
return self.func(obj)
except TypeError as te: # Function is not callable
raise MarshallingError(te)
except AttributeError: # the object is not expected to have the attribute
pass
class Select(Raw):
"""A field that provides a set of values which an attribute must be
contrained to.
:param choices: A list of valid values.
:param default: Default value for the field if the attribute is not set.
:param str attribute: The name of the attribute to get the value from. If
``None``, assumes the attribute has the same name as the field.
:param str error: Error message stored upon validation failure.
:raises: MarshallingError if attribute's value is not one of the given choices.
"""
def __init__(self, choices, default=None, attribute=None, error=None, **kwargs):
self.choices = choices
return super(Select, self).__init__(default, attribute, error, **kwargs)
def format(self, value):
if value not in self.choices:
raise MarshallingError("{0!r} is not a valid choice for this field.".format(value))
return value
Enum = Select
|
bsd-2-clause
| 5,213,536,666,867,892,000 | 33.961057 | 95 | 0.597685 | false |
salrashid123/gcpsamples
|
id_token/iam_svc_tokens/main.py
|
1
|
3398
|
import logging
import os
import sys
import json
import time
import pprint
from apiclient.discovery import build
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client.client import GoogleCredentials
from apiclient import discovery
custom_claim = "some custom_claim"
audience = 'api.endpoints.YOUR_PROJECT.cloud.goog'
svc_account_A = 'svc-2-429@mineral-minutia-820.iam.gserviceaccount.com'
svc_account_B = 'service-account-b@mineral-minutia-820.iam.gserviceaccount.com'
svc_account_C = 'id-service-account-c@mineral-minutia-820.iam.gserviceaccount.com'
# initialize root creds for A
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/srashid/gcp_misc/certs/mineral-minutia-820-83b3ce7dcddb.json"
project_id ='-'
cc = GoogleCredentials.get_application_default()
iam_scopes = 'https://www.googleapis.com/auth/iam https://www.googleapis.com/auth/cloud-platform'
if cc.create_scoped_required():
cc = cc.create_scoped(iam_scopes)
http = cc.authorize(httplib2.Http())
service = build(serviceName='iam', version= 'v1',http=http)
resource = service.projects()
now = int(time.time())
exptime = now + 3600
claim =('{"iss":"%s",'
'"aud":"%s",'
'"sub":"%s",'
'"X-Goog-Authenticated-User-ID":"%s",'
'"exp":%s,'
'"iat":%s}') %(svc_account_B,audience,svc_account_B,custom_claim,exptime,now)
slist = resource.serviceAccounts().signJwt(name='projects/' + project_id + '/serviceAccounts/' + svc_account_B, body={'payload': claim })
resp = slist.execute()
signed_jwt = resp['signedJwt']
print 'iam.signJwt() for A: --------------------- '
print signed_jwt
iamcredentials = build(serviceName='iamcredentials', version= 'v1',http=http)
print '=========================== no delegation =================================='
body={
"delegates": [],
"scope": [
"https://www.googleapis.com/auth/cloud-platform"
],
"lifetime": "300s"
}
req = iamcredentials.projects().serviceAccounts().generateAccessToken(name='projects/' + project_id + '/serviceAccounts/' + svc_account_B, body=body )
resp = req.execute()
print 'iamcredentials.generateAccessToken(): --------------------- '
print resp
body = {
"delegates": [],
"audience": svc_account_B,
"includeEmail": "true"
}
req = iamcredentials.projects().serviceAccounts().generateIdToken(name='projects/' + project_id + '/serviceAccounts/' + svc_account_B, body=body )
resp = req.execute()
print 'iamcredentials.generateIdToken(): --------------------- '
print resp
print '=========================== delegation =================================='
body={
"delegates": [
'projects/-/serviceAccounts/' + svc_account_B
],
"scope": [
"https://www.googleapis.com/auth/cloud-platform"
],
"lifetime": "300s"
}
req = iamcredentials.projects().serviceAccounts().generateAccessToken(name='projects/' + project_id + '/serviceAccounts/' + svc_account_C, body=body )
resp = req.execute()
print 'iamcredentials.generateAccessToken(): --------------------- '
print resp
body = {
"delegates": [
'projects/-/serviceAccounts/' + svc_account_B
],
"audience": svc_account_B,
"includeEmail": "true"
}
req = iamcredentials.projects().serviceAccounts().generateIdToken(name='projects/' + project_id + '/serviceAccounts/' + svc_account_C, body=body )
resp = req.execute()
print 'iamcredentials.generateIdToken(): --------------------- '
print resp
|
apache-2.0
| 2,382,444,408,897,355,000 | 26.184 | 150 | 0.662743 | false |
cbpygit/pypmj
|
projects/scattering/photonic_crystals/slabs/hexagonal/half_spaces/hex_plane_tools.py
|
1
|
4284
|
from scipy.linalg import expm, norm
import numpy as np
def rot_mat(axis, theta):
return expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def rotate_vector(v, axis, theta):
M = rot_mat(axis, theta)
return np.tensordot(M,v,axes=([0],[1])).T #np.dot(M, v)
def rotate_around_z(v, theta):
return rotate_vector(v, np.array([0.,0.,1.]), theta)
def is_odd(num):
return num & 0x1
def is_inside_hexagon(x, y, d=None, x0=0., y0=0.):
p_eps = 10.*np.finfo(float).eps
if d is None:
d = y.max() - y.min() + p_eps
dx = np.abs(x - x0)/d
dy = np.abs(y - y0)/d
a = 0.25 * np.sqrt(3.0)
return np.logical_and(dx <= a, a*dy + 0.25*dx <= 0.5*a)
def get_hex_plane(plane_idx, inradius, z_height, z_center, np_xy,
np_z):
# We use 10* float machine precision to correct the ccordinates
# to avoid leaving the computational domain due to precision
# problems
p_eps = 10.*np.finfo(float).eps
ri = inradius # short for inradius
rc = inradius/np.sqrt(3.)*2. # short for circumradius
if np_z == 'auto':
np_z = int(np.round(float(np_xy)/2./rc*z_height))
# XY-plane (no hexagonal shape!)
if plane_idx == 6:
X = np.linspace(-ri+p_eps, ri-p_eps, np_xy)
Y = np.linspace(-rc+p_eps, rc-p_eps, np_xy)
XY = np.meshgrid(X,Y)
XYrs = np.concatenate((XY[0][..., np.newaxis],
XY[1][..., np.newaxis]),
axis=2)
Z = np.ones((np_xy, np_xy, 1))*z_center
pl = np.concatenate((XYrs, Z), axis=2)
pl = pl.reshape(-1, pl.shape[-1])
# Restrict to hexagon
idx_hex = is_inside_hexagon(pl[:,0], pl[:,1])
return pl[idx_hex]
# Vertical planes
elif plane_idx < 6:
r = rc if is_odd(plane_idx) else ri
r = r-p_eps
xy_line = np.empty((np_xy,2))
xy_line[:,0] = np.linspace(-r, r, np_xy)
xy_line[:,1] = 0.
z_points = np.linspace(0.+p_eps, z_height-p_eps, np_z)
# Construct the plane
plane = np.empty((np_xy*np_z, 3))
for i, xy in enumerate(xy_line):
for j, z in enumerate(z_points):
idx = i*np_z + j
plane[idx, :2] = xy
plane[idx, 2] = z
# Rotate the plane
return rotate_around_z(plane, plane_idx*np.pi/6.)
else:
raise ValueError('`plane_idx` must be in [0...6].')
def get_hex_planes_point_list(inradius, z_height, z_center, np_xy, np_z,
plane_indices=[0,1,2,3,6]):
# Construct the desired planes
planes = []
for i in plane_indices:
planes.append(get_hex_plane(i, inradius, z_height, z_center,
np_xy, np_z))
# Flatten and save lengths
lengths = [len(p) for p in planes]
return np.vstack(planes), np.array(lengths)
def hex_planes_point_list_for_keys(keys, plane_indices=[0,1,2,3,6]):
if not 'uol' in keys:
keys['uol'] = 1.e-9
inradius = keys['p'] * keys['uol'] /2.
z_height = (keys['h'] + keys['h_sub'] + keys['h_sup']) * keys['uol']
z_center = (keys['h_sub']+keys['h']/2.) * keys['uol']
np_xy = keys['hex_np_xy']
if not 'hex_np_z' in keys:
np_z = 'auto'
return get_hex_planes_point_list(inradius, z_height, z_center, np_xy,
np_z)
def plane_idx_iter(lengths_):
"""Yields the plane index plus lower index `idx_i` and upper index
`idx_f` of the point list representing this plane
(i.e. pointlist[idx_i:idx_f]).
"""
i = 0
while i < len(lengths_):
yield i, lengths_[:i].sum(), lengths_[:(i+1)].sum()
i += 1
def plot_planes(pointlist, lengths):
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = sns.color_palette('husl', len(lengths))
for i, idx_i, idx_f in plane_idx_iter(lengths):
pl = pointlist[idx_i:idx_f]
ax.scatter(pl[:,0], pl[:,1], pl[:,2], s=10., c=colors[i],
label='plane {}'.format(i+1), linewidth=0.)
_ = plt.legend(loc='upper left')
|
gpl-3.0
| 4,831,893,324,004,283,000 | 32.732283 | 74 | 0.535247 | false |
JoshAshby/seshat
|
seshat/actions.py
|
1
|
3451
|
#!/usr/bin/env python
"""
Actions allow you to write code that looks like::
class RandomController(BaseController):
def GET(self):
return Redirect("/")
which I think looks a lot nicer than::
class RandomController(BaseController):
def GET(self):
self.head.status = "303 SEE OTHER"
self.head.append("location", "/")
This module provides a few common Action classes to use, along with a
BaseAction which can be inherited to create your own Actions.
"""
"""
For more information and licensing, see: https://github.com/JoshAshby/seshat
http://xkcd.com/353/
Josh Ashby
2014
http://joshashby.com
joshuaashby@joshashby.com
"""
from head import Head
class BaseAction(object):
"""
Provides a base for creating a new object which represents an HTTP Status code.
All returned data is checked if it is of type `BaseAction` and if so, the
data/actions head is returned rather than the controllers head. This allows
for a syntax like::
return NotFound()
which will cause the controller to return a 404 status code.
To create a new action, inherit this class then make a new `__init__(self, *kargs)`
which sets `self.head` to a :py:class:`.Head` object.
"""
head = None
def __len__(self):
return 0
def __str__(self):
return ""
def __unicode__(self):
return ""
def encode(self, val):
return str(self).encode(val)
##############################################################################
###### ### ###
# ## ## ## ##
# ## ## ## ##
# ## ## ## ##
###### ## ## ## ##
# ## ## ## ##
# ## ## ## ##
# ## ## ## ##
###### ### ###
##############################################################################
class Redirect(BaseAction):
"""
Returns a 303 See Other status code along with a `location` header back
to the client.
:param loc: The location to which the client should be redirect to
:type loc: str
"""
def __init__(self, loc):
self.head = Head("303 SEE OTHER")
self.head.add_header("Location", loc)
##############################################################################
### ### ###
# # ## ## ## ##
# # ## ## ## ##
# # ## ## ## ##
####### ## ## ## ##
# ## ## ## ##
# ## ## ## ##
# ## ## ## ##
# ### ###
##############################################################################
class BadRequest(BaseAction):
def __init__(self):
self.head = Head("400 BAD REQUEST")
class Unauthorized(BaseAction):
"""
Returns a 401 Unauthorized status code back to the client
"""
def __init__(self):
self.head = Head("401 UNAUTHORIZED")
class Forbidden(BaseAction):
def __init__(self):
self.head = Head("403 FORBIDDEN")
class NotFound(BaseAction):
"""
Returns a 404 Not Found code and the resulting 404 error controller to be
returned to the client.
"""
def __init__(self):
self.head = Head("404 NOT FOUND")
class MethodNotAllowed(BaseAction):
def __init__(self, allow):
assert type(allow) is list
a = ", ".join(allow).upper()
al = [("Allow", a)]
self.head = Head("405 METHOD NOT ALLOWED", al)
|
gpl-3.0
| 7,935,633,218,518,734,000 | 25.960938 | 87 | 0.485656 | false |
rolandgeider/wger
|
wger/core/views/user.py
|
1
|
20020
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Django
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import (
authenticate,
login as django_login,
logout as django_logout
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin
)
from django.contrib.auth.models import User
from django.contrib.auth.views import (
LoginView,
PasswordChangeView,
PasswordResetConfirmView,
PasswordResetView
)
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect
)
from django.shortcuts import (
get_object_or_404,
render
)
from django.template.context_processors import csrf
from django.urls import (
reverse,
reverse_lazy
)
from django.utils import translation
from django.utils.translation import (
ugettext as _,
ugettext_lazy
)
from django.views.generic import (
DetailView,
ListView,
RedirectView,
UpdateView
)
# Third Party
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
ButtonHolder,
Column,
Layout,
Row,
Submit
)
from rest_framework.authtoken.models import Token
# wger
from wger.config.models import GymConfig
from wger.core.forms import (
PasswordConfirmationForm,
RegistrationForm,
RegistrationFormNoCaptcha,
UserLoginForm,
UserPersonalInformationForm,
UserPreferencesForm
)
from wger.core.models import Language
from wger.gym.models import (
AdminUserNote,
Contract,
GymUserConfig
)
from wger.manager.models import (
Workout,
WorkoutLog,
WorkoutSession
)
from wger.nutrition.models import NutritionPlan
from wger.utils.api_token import create_token
from wger.utils.generic_views import (
WgerFormMixin,
WgerMultiplePermissionRequiredMixin
)
from wger.weight.models import WeightEntry
logger = logging.getLogger(__name__)
def login(request):
"""
Small wrapper around the django login view
"""
next_url = "?next=" + request.GET.get('next') if request.GET.get('next') else ''
form = UserLoginForm
form.helper.form_action = reverse('core:user:login') + next_url
return LoginView.as_view(template_name='user/login.html',
authentication_form=form)
@login_required()
def delete(request, user_pk=None):
"""
Delete a user account and all his data, requires password confirmation first
If no user_pk is present, the user visiting the URL will be deleted, otherwise
a gym administrator is deleting a different user
"""
if user_pk:
user = get_object_or_404(User, pk=user_pk)
# Forbidden if the user has not enough rights, doesn't belong to the
# gym or is an admin as well. General admins can delete all users.
if not request.user.has_perm('gym.manage_gyms') \
and (not request.user.has_perm('gym.manage_gym')
or request.user.userprofile.gym_id != user.userprofile.gym_id
or user.has_perm('gym.manage_gym')
or user.has_perm('gym.gym_trainer')
or user.has_perm('gym.manage_gyms')):
return HttpResponseForbidden()
else:
user = request.user
form = PasswordConfirmationForm(user=request.user)
if request.method == 'POST':
form = PasswordConfirmationForm(data=request.POST, user=request.user)
if form.is_valid():
user.delete()
messages.success(request,
_('Account "{0}" was successfully deleted').format(user.username))
if not user_pk:
django_logout(request)
return HttpResponseRedirect(reverse('software:features'))
else:
gym_pk = request.user.userprofile.gym_id
return HttpResponseRedirect(reverse('gym:gym:user-list', kwargs={'pk': gym_pk}))
form.helper.form_action = request.path
context = {'form': form,
'user_delete': user}
return render(request, 'user/delete_account.html', context)
@login_required()
def trainer_login(request, user_pk):
"""
Allows a trainer to 'log in' as the selected user
"""
user = get_object_or_404(User, pk=user_pk)
orig_user_pk = request.user.pk
# Changing only between the same gym
if request.user.userprofile.gym != user.userprofile.gym:
return HttpResponseForbidden()
# No changing if identity is not set
if not request.user.has_perm('gym.gym_trainer') \
and not request.session.get('trainer.identity'):
return HttpResponseForbidden()
# Changing between trainers or managers is not allowed
if request.user.has_perm('gym.gym_trainer') \
and (user.has_perm('gym.gym_trainer')
or user.has_perm('gym.manage_gym')
or user.has_perm('gym.manage_gyms')):
return HttpResponseForbidden()
# Check if we're switching back to our original account
own = False
if (user.has_perm('gym.gym_trainer')
or user.has_perm('gym.manage_gym')
or user.has_perm('gym.manage_gyms')):
own = True
# Note: when logging without authenticating, it is necessary to set the
# authentication backend
if own:
del(request.session['trainer.identity'])
django_login(request, user, 'django.contrib.auth.backends.ModelBackend')
if not own:
request.session['trainer.identity'] = orig_user_pk
if request.GET.get('next'):
return HttpResponseRedirect(request.GET['next'])
else:
return HttpResponseRedirect(reverse('core:index'))
else:
return HttpResponseRedirect(reverse('gym:gym:user-list',
kwargs={'pk': user.userprofile.gym_id}))
def logout(request):
"""
Logout the user. For temporary users, delete them.
"""
user = request.user
django_logout(request)
if user.is_authenticated and user.userprofile.is_temporary:
user.delete()
return HttpResponseRedirect(reverse('core:user:login'))
def registration(request):
"""
A form to allow for registration of new users
"""
# If global user registration is deactivated, redirect
if not settings.WGER_SETTINGS['ALLOW_REGISTRATION']:
return HttpResponseRedirect(reverse('software:features'))
template_data = {}
template_data.update(csrf(request))
# Don't show captcha if the global parameter is false
FormClass = RegistrationForm if settings.WGER_SETTINGS['USE_RECAPTCHA'] \
else RegistrationFormNoCaptcha
# Redirect regular users, in case they reached the registration page
if request.user.is_authenticated and not request.user.userprofile.is_temporary:
return HttpResponseRedirect(reverse('core:dashboard'))
if request.method == 'POST':
form = FormClass(data=request.POST)
# If the data is valid, log in and redirect
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
user = User.objects.create_user(username,
email,
password)
user.save()
# Pre-set some values of the user's profile
language = Language.objects.get(short_name=translation.get_language())
user.userprofile.notification_language = language
# Set default gym, if needed
gym_config = GymConfig.objects.get(pk=1)
if gym_config.default_gym:
user.userprofile.gym = gym_config.default_gym
# Create gym user configuration object
config = GymUserConfig()
config.gym = gym_config.default_gym
config.user = user
config.save()
user.userprofile.save()
user = authenticate(username=username, password=password)
django_login(request, user)
messages.success(request, _('You were successfully registered'))
return HttpResponseRedirect(reverse('core:dashboard'))
else:
form = FormClass()
template_data['form'] = form
template_data['title'] = _('Register')
return render(request, 'form.html', template_data)
@login_required
def preferences(request):
"""
An overview of all user preferences
"""
template_data = {}
template_data.update(csrf(request))
redirect = False
# Process the preferences form
if request.method == 'POST':
form = UserPreferencesForm(data=request.POST, instance=request.user.userprofile)
form.user = request.user
# Save the data if it validates
if form.is_valid():
form.save()
redirect = True
else:
data = {'first_name': request.user.first_name,
'last_name': request.user.last_name,
'email': request.user.email}
form = UserPreferencesForm(initial=data, instance=request.user.userprofile)
# Process the email form
if request.method == 'POST':
email_form = UserPersonalInformationForm(data=request.POST, instance=request.user)
if email_form.is_valid() and redirect:
email_form.save()
redirect = True
else:
redirect = False
template_data['form'] = form
if redirect:
messages.success(request, _('Settings successfully updated'))
return HttpResponseRedirect(reverse('core:user:preferences'))
else:
return render(request, 'user/preferences.html', template_data)
class UserDeactivateView(LoginRequiredMixin,
WgerMultiplePermissionRequiredMixin,
RedirectView):
"""
Deactivates a user
"""
permanent = False
model = User
permission_required = ('gym.manage_gym', 'gym.manage_gyms', 'gym.gym_trainer')
def dispatch(self, request, *args, **kwargs):
"""
Only managers and trainers for this gym can access the members
"""
edit_user = get_object_or_404(User, pk=self.kwargs['pk'])
if not request.user.is_authenticated:
return HttpResponseForbidden()
if (request.user.has_perm('gym.manage_gym') or request.user.has_perm('gym.gym_trainer')) \
and edit_user.userprofile.gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(UserDeactivateView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, pk):
edit_user = get_object_or_404(User, pk=pk)
edit_user.is_active = False
edit_user.save()
messages.success(self.request, _('The user was successfully deactivated'))
return reverse('core:user:overview', kwargs=({'pk': pk}))
class UserActivateView(LoginRequiredMixin,
WgerMultiplePermissionRequiredMixin,
RedirectView):
"""
Activates a previously deactivated user
"""
permanent = False
model = User
permission_required = ('gym.manage_gym', 'gym.manage_gyms', 'gym.gym_trainer')
def dispatch(self, request, *args, **kwargs):
"""
Only managers and trainers for this gym can access the members
"""
edit_user = get_object_or_404(User, pk=self.kwargs['pk'])
if not request.user.is_authenticated:
return HttpResponseForbidden()
if (request.user.has_perm('gym.manage_gym') or request.user.has_perm('gym.gym_trainer')) \
and edit_user.userprofile.gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(UserActivateView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, pk):
edit_user = get_object_or_404(User, pk=pk)
edit_user.is_active = True
edit_user.save()
messages.success(self.request, _('The user was successfully activated'))
return reverse('core:user:overview', kwargs=({'pk': pk}))
class UserEditView(WgerFormMixin,
LoginRequiredMixin,
WgerMultiplePermissionRequiredMixin,
UpdateView):
"""
View to update the personal information of an user by an admin
"""
model = User
title = ugettext_lazy('Edit user')
permission_required = ('gym.manage_gym', 'gym.manage_gyms')
form_class = UserPersonalInformationForm
def dispatch(self, request, *args, **kwargs):
"""
Check permissions
- Managers can edit members of their own gym
- General managers can edit every member
"""
user = request.user
if not user.is_authenticated:
return HttpResponseForbidden()
if user.has_perm('gym.manage_gym') \
and not user.has_perm('gym.manage_gyms') \
and user.userprofile.gym != self.get_object().userprofile.gym:
return HttpResponseForbidden()
return super(UserEditView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('core:user:overview', kwargs={'pk': self.kwargs['pk']})
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(UserEditView, self).get_context_data(**kwargs)
context['title'] = _('Edit {0}'.format(self.object))
return context
@login_required
def api_key(request):
"""
Allows the user to generate an API key for the REST API
"""
context = {}
context.update(csrf(request))
try:
token = Token.objects.get(user=request.user)
except Token.DoesNotExist:
token = None
if request.GET.get('new_key'):
token = create_token(request.user, request.GET.get('new_key'))
# Redirect to get rid of the GET parameter
return HttpResponseRedirect(reverse('core:user:api-key'))
context['token'] = token
return render(request, 'user/api_key.html', context)
class UserDetailView(LoginRequiredMixin, WgerMultiplePermissionRequiredMixin, DetailView):
"""
User overview for gyms
"""
model = User
permission_required = ('gym.manage_gym', 'gym.manage_gyms', 'gym.gym_trainer')
template_name = 'user/overview.html'
context_object_name = 'current_user'
def dispatch(self, request, *args, **kwargs):
"""
Check permissions
- Only managers for this gym can access the members
- General managers can access the detail page of all users
"""
user = request.user
if not user.is_authenticated:
return HttpResponseForbidden()
if (user.has_perm('gym.manage_gym') or user.has_perm('gym.gym_trainer')) \
and not user.has_perm('gym.manage_gyms') \
and user.userprofile.gym != self.get_object().userprofile.gym:
return HttpResponseForbidden()
return super(UserDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(UserDetailView, self).get_context_data(**kwargs)
out = []
workouts = Workout.objects.filter(user=self.object).all()
for workout in workouts:
logs = WorkoutLog.objects.filter(workout=workout)
out.append({'workout': workout,
'logs': logs.dates('date', 'day').count(),
'last_log': logs.last()})
context['workouts'] = out
context['weight_entries'] = WeightEntry.objects.filter(user=self.object)\
.order_by('-date')[:5]
context['nutrition_plans'] = NutritionPlan.objects.filter(user=self.object)\
.order_by('-creation_date')[:5]
context['session'] = WorkoutSession.objects.filter(user=self.object).order_by('-date')[:10]
context['admin_notes'] = AdminUserNote.objects.filter(member=self.object)[:5]
context['contracts'] = Contract.objects.filter(member=self.object)[:5]
return context
class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
"""
Overview of all users in the instance
"""
model = User
permission_required = ('gym.manage_gyms',)
template_name = 'user/list.html'
def get_queryset(self):
"""
Return a list with the users, not really a queryset.
"""
out = {'admins': [],
'members': []}
for u in User.objects.select_related('usercache', 'userprofile__gym').all():
out['members'].append({'obj': u,
'last_log': u.usercache.last_activity})
return out
def get_context_data(self, **kwargs):
"""
Pass other info to the template
"""
context = super(UserListView, self).get_context_data(**kwargs)
context['show_gym'] = True
context['user_table'] = {'keys': [_('ID'),
_('Username'),
_('Name'),
_('Last activity'),
_('Gym')],
'users': context['object_list']['members']}
return context
class WgerPasswordChangeView(PasswordChangeView):
template_name = 'form.html'
success_url = reverse_lazy('core:user:preferences')
title = ugettext_lazy("Change password")
def get_form(self, form_class=None):
form = super(WgerPasswordChangeView, self).get_form(form_class)
form.helper = FormHelper()
form.helper.form_class = 'wger-form'
form.helper.layout = Layout(
'old_password',
Row(
Column('new_password1', css_class='form-group col-6 mb-0'),
Column('new_password2', css_class='form-group col-6 mb-0'),
css_class='form-row'
),
ButtonHolder(Submit('submit', _("Save"), css_class='btn-success btn-block'))
)
return form
class WgerPasswordResetView(PasswordResetView):
template_name = 'form.html'
email_template_name = 'registration/password_reset_email.html'
success_url = reverse_lazy('core:user:password_reset_done')
def get_form(self, form_class=None):
form = super(WgerPasswordResetView, self).get_form(form_class)
form.helper = FormHelper()
form.helper.form_class = 'wger-form'
form.helper.add_input(Submit('submit', _("Save"), css_class='btn-success btn-block'))
return form
class WgerPasswordResetConfirmView(PasswordResetConfirmView):
template_name = 'form.html'
success_url = reverse_lazy('core:user:login')
def get_form(self, form_class=None):
form = super(WgerPasswordResetConfirmView, self).get_form(form_class)
form.helper = FormHelper()
form.helper.form_class = 'wger-form'
form.helper.add_input(Submit('submit', _("Save"), css_class='btn-success btn-block'))
return form
|
agpl-3.0
| -5,030,452,352,494,952,000 | 32.311148 | 99 | 0.625325 | false |
beingzy/user_recommender_framework
|
groupwise_distance_learning/tests/test_helper_func.py
|
1
|
2232
|
""" functions for developing
Author: Yi Zhang <beingzy@gmail.com>
Date: 2016/03/10
"""
import os
import os.path
from os.path import dirname, abspath, join
import pandas as pd
def get_file_parent_dir_path(level=1):
""" return the path of the parent directory of current file """
current_dir_path = dirname(abspath(__file__))
path_sep = os.path.sep
components = current_dir_path.split(path_sep)
return path_sep.join(components[:-level])
def load_sample_test_data():
""" load small test data """
_root_dir = get_file_parent_dir_path(level=2)
_data_dir = join(_root_dir, 'data', 'small_test')
user_profile_fpath = join(_data_dir, "user_profile.csv")
user_connections_fpath = join(_data_dir, "connections.csv")
int_user_profile_df = pd.read_csv(user_profile_fpath, header=0, sep=',')
user_connections_df = pd.read_csv(user_connections_fpath, header=0, sep=',')
user_ids = int_user_profile_df.id.tolist()
# remove id columns and cetegorical feature column
user_profile_df = int_user_profile_df.drop(["id", "feat_3"], axis=1, inplace=False).as_matrix()
user_connections_df = user_connections_df.as_matrix()
return user_ids, user_profile_df, user_connections_df
def load_simulated_test_data():
""" load simulationd data with defined two groups """
_root_dir = get_file_parent_dir_path(level=2)
_data_dir = join(_root_dir, 'data', 'sim_two_groups')
user_profile_fpath = join(_data_dir, "user_profiles.csv")
user_connections_fpath = join(_data_dir, "friendships.csv")
# prepare user profile information
user_profile_df = pd.read_csv(user_profile_fpath, header=0, sep=",")
# unpack data
user_ids = user_profile_df.ID.tolist()
user_true_groups = user_profile_df.decision_style.tolist()
user_profile_df = user_profile_df.drop(["ID", "decision_style"], axis=1, inplace=False).as_matrix()
user_connections_df = pd.read_csv(user_connections_fpath, header=0, sep=",")
user_connections_df = (user_connections_df[user_connections_df.isFriend==1]
.drop('isFriend', axis=1, inplace=False).astype(int).as_matrix())
return user_ids, user_profile_df, user_connections_df, user_true_groups
|
gpl-3.0
| 5,844,418,779,091,332,000 | 35 | 103 | 0.675627 | false |
nkhumphreys/django-documentregister
|
documentregister/documents/tests.py
|
1
|
7760
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from documents.models import DocumentType, Document
from documents.forms import DocumentRegistrationForm
from django_webtest import WebTest
class DocumentModelTest(TestCase):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
self.d = Document.objects.create(
document_type=self.dt,
title='test',
description='test',
author_name='Mr QA',
author_email='qa@example.com',
link='https://www.example.com'
)
def test_document_tag(self):
document = Document.objects.get(pk=self.d.pk)
self.assertEqual(
document.tag,
self.d.document_type.document_type_short + str(self.d.pk).zfill(5)
)
def test_full_title(self):
document = Document.objects.get(pk=self.d.pk)
full_title = '{} {}'.format(
self.d.document_type.document_type_short + str(self.d.pk).zfill(5),
self.d.title
)
self.assertEqual(
document.full_title,
full_title
)
def test_document_type_str(self):
document_type = DocumentType.objects.get(pk=self.dt.pk)
self.assertEqual(
str(document_type),
'{} - {}'.format(self.dt.document_type_short, self.dt.document_type_long)
)
class DocumentRegistrationFormTest(TestCase):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
def test_valid_data(self):
form = DocumentRegistrationForm({
'title': 'Test Document',
'document_type': self.dt.pk,
'description': 'test document description',
'author_name': 'Mr QA',
'author_email': 'mr.qa@example.com',
'link': 'https://example.com'
})
self.assertTrue(form.is_valid())
document = form.save()
self.assertEqual(document.title, 'Test Document')
self.assertEqual(document.document_type.pk, self.dt.pk)
self.assertEqual(document.description, 'test document description')
self.assertEqual(document.author_name, 'Mr QA')
self.assertEqual(document.author_email, 'mr.qa@example.com')
self.assertEqual(document.link, 'https://example.com')
def test_missing_data(self):
form = DocumentRegistrationForm({})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['This field is required.'],
'document_type': ['This field is required.'],
'description': ['This field is required.'],
'author_name': ['This field is required.'],
'author_email': ['This field is required.'],
'link': ['This field is required.']
})
def test_invalid_link(self):
form = DocumentRegistrationForm({
'title': 'Test Document',
'document_type': self.dt.pk,
'description': 'test document description',
'author_name': 'Mr QA',
'author_email': 'mr.qa@example.com',
'link': 'invalid_link'
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'link': ['Enter a valid URL.'],
})
def test_invalid_email(self):
form = DocumentRegistrationForm({
'title': 'Test Document',
'document_type': self.dt.pk,
'description': 'test document description',
'author_name': 'Mr QA',
'author_email': 'invalid_example.com',
'link': 'https://example.com'
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'author_email': ['Enter a valid email address.'],
})
class DocumentRegisterViewTest(WebTest):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
def test_view_page(self):
page = self.app.get(reverse('register'), user='Test')
self.assertEqual(len(page.forms), 3)
def test_submit_form(self):
page = self.app.get(reverse('register'), user='Test')
form = page.forms.get('register_form')
form['title'] = 'Test'
form['document_type'] = self.dt.pk
form['description'] = 'test document description'
form['author_name'] = 'Mr QA'
form['author_email'] = 'mr.qa@example.com'
form['link'] = 'https://example.com'
response = form.submit()
self.assertEqual(response.status_code, 302)
self.assertContains(response.follow(), 'test document description')
class DocumentSearchViewTest(WebTest):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
self.d = Document.objects.create(
document_type=self.dt,
title='test',
description='test',
author_name='Mr QA',
author_email='qa@example.com',
link='https://www.example.com'
)
def test_search_page_noresults(self):
page = self.app.get(reverse('search') + '?q=not_going_to_find_this', user='Test')
self.assertContains(page, 'No documents found!!! :(')
def test_search_page_oneresult(self):
page = self.app.get(reverse('search') + '?q=Test', user='Test')
self.assertContains(page, self.d.tag)
def test_search_page_noquery(self):
page = self.app.get(reverse('search'), user='Test')
self.assertContains(page, self.d.tag)
class DocumentDetailViewTest(WebTest):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
self.d = Document.objects.create(
document_type=self.dt,
title='test',
description='test',
author_name='Mr QA',
author_email='qa@example.com',
link='https://www.example.com'
)
def test_detail_page(self):
page = self.app.get(reverse('detail', kwargs={'pk': self.d.pk}), user='Test')
self.assertContains(page, self.d.full_title)
class DocumentEditViewTest(WebTest):
def setUp(self):
self.dt = DocumentType.objects.create(
document_type_short='TST',
document_type_long='Test Document',
description='test description'
)
self.d = Document.objects.create(
document_type=self.dt,
title='test',
description='test',
author_name='Mr QA',
author_email='qa@example.com',
link='https://www.example.com'
)
def test_view_edit_page(self):
page = self.app.get(reverse('edit', kwargs={'pk': self.d.pk}), user='Test')
self.assertEqual(len(page.forms), 3)
form = page.forms.get('register_form')
form['title'] = 'New Title'
response = form.submit()
self.assertEqual(response.status_code, 302)
self.assertContains(response.follow(), 'New Title')
new_d = Document.objects.get(pk=self.d.pk)
self.assertEqual(new_d.title, 'New Title')
|
mit
| -5,455,818,955,410,780,000 | 33.185022 | 89 | 0.576804 | false |
dials/dials
|
command_line/slice_sequence.py
|
1
|
8896
|
from os.path import basename, splitext
from dxtbx.model.experiment_list import ExperimentList
import dials.util
from dials.algorithms.refinement.refinement_helpers import calculate_frame_numbers
from dials.array_family import flex
from dials.util import Sorry
from dials.util.multi_dataset_handling import generate_experiment_identifiers
from dials.util.slice import slice_experiments, slice_reflections
help_message = """
Slice a sequence to produce a smaller sequence within the bounds of the original. If
experiments or experiments are provided, modify the scan objects within these. If
reflections are provided, remove reflections outside the provided image ranges.
Each image_range parameter refers to a single experiment ID, counting up from
zero. Any reflections with experiment ID not matched by a image_range parameter
are removed.
Examples::
dials.slice_sequence models.expt observations.refl "image_range=1 20"
dials.slice_sequence models.expt "image_range=1 20"
# two experiments and reflections with IDs '0' and '1'
dials.slice_sequence models.expt observations.refl \
"image_range=1 20" "image_range=5 30"
"""
from libtbx.phil import parse
phil_scope = parse(
"""
output {
reflections_filename = None
.type = str
.help = "The filename for output reflections sliced to remove those"
"outside the reduced image range. By default generated"
"automatically from the input name"
experiments_filename = None
.type = str
.help = "The filename for the output experiments with sliced scans.
By default generated automatically from the input name"
}
image_range = None
.help = "Range in images to slice a sequence. The number of arguments"
"must be a factor of two. Each pair of arguments gives a range"
"that follows C conventions (e.g. j0 <= j < j1) when slicing the"
"reflections by observed centroid."
.type = ints(size=2)
.multiple = True
block_size = None
.type = float
.help = "Overrides image_range if present. This option splits each sequence"
"into the nearest integer number of equal size blocks close to"
"block_size degrees in width"
"""
)
def calculate_block_ranges(scan, block_size):
"""
:param scans
:type a scan object
:param block_size:
:type block_size: target block size in degrees"""
image_ranges = []
nimages = scan.get_num_images()
osc_range = scan.get_oscillation_range(deg=True)
osc_width = abs(osc_range[1] - osc_range[0])
nblocks = max(int(round(osc_width / block_size)), 1)
nblocks = min(nblocks, nimages)
# equal sized blocks except the last one that may contain extra images
# to make up the remainder
nimages_per_block = [nimages // nblocks] * (nblocks - 1) + [
nimages // nblocks + nimages % nblocks
]
start = scan.get_image_range()[0]
for nim in nimages_per_block:
image_ranges.append((start, start + nim - 1))
start += nim
return image_ranges
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import OptionParser
usage = (
"usage: dials.slice_sequence [options] [param.phil] "
"models.expt observations.refl"
)
# Create the parser
self.parser = OptionParser(
usage=usage,
phil=phil_scope,
read_reflections=True,
read_experiments=True,
check_format=False,
epilog=help_message,
)
def run(self, args=None):
"""Execute the script."""
from dials.util.options import reflections_and_experiments_from_files
# Parse the command line
params, options = self.parser.parse_args(args, show_diff_phil=True)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
# Try to load the models and data
slice_exps = len(experiments) > 0
slice_refs = len(reflections) > 0
# Catch case of nothing to do
if not slice_exps and not slice_refs:
print("No suitable input provided")
self.parser.print_help()
return
if reflections:
if len(reflections) > 1:
raise Sorry("Only one reflections list can be imported at present")
reflections = reflections[0]
# calculate frame numbers if needed
if experiments:
reflections = calculate_frame_numbers(reflections, experiments)
# if we still don't have the right column give up
if "xyzobs.px.value" not in reflections:
raise Sorry(
"These reflections do not have frame numbers set, and "
"there are no experiments provided to calculate these."
)
# set trivial case where no scan range is provided at all
if not params.image_range:
params.image_range = [None]
# check if slicing into blocks
if params.block_size is not None:
if not slice_exps:
raise Sorry(
"For slicing into blocks, an experiment file must be provided"
)
if len(experiments) > 1:
raise Sorry("For slicing into blocks please provide a single scan only")
scan = experiments[0].scan
# Having extracted the scan, calculate the blocks
params.image_range = calculate_block_ranges(scan, params.block_size)
# Do the slicing then recombine
sliced = [
slice_experiments(experiments, [sr])[0] for sr in params.image_range
]
generate_experiment_identifiers(sliced)
sliced_experiments = ExperimentList(sliced)
# slice reflections if present
if slice_refs:
sliced = [
slice_reflections(reflections, [sr]) for sr in params.image_range
]
sliced_reflections = flex.reflection_table()
identifiers = sliced_experiments.identifiers()
# resetting experiment identifiers
for i, rt in enumerate(sliced):
for k in rt.experiment_identifiers().keys():
del rt.experiment_identifiers()[k]
rt["id"] = flex.int(rt.size(), i) # set id
rt.experiment_identifiers()[i] = identifiers[i]
sliced_reflections.extend(rt)
else:
# slice each dataset into the requested subset
if slice_exps:
sliced_experiments = slice_experiments(experiments, params.image_range)
if slice_refs:
sliced_reflections = slice_reflections(reflections, params.image_range)
# Save sliced experiments
if slice_exps:
output_experiments_filename = params.output.experiments_filename
if output_experiments_filename is None:
# take first filename as template
bname = basename(params.input.experiments[0].filename)
bname = splitext(bname)[0]
if not bname:
bname = "experiments"
if len(params.image_range) == 1 and params.image_range[0] is not None:
ext = "_{}_{}.expt".format(*params.image_range[0])
else:
ext = "_sliced.expt"
output_experiments_filename = bname + ext
print(f"Saving sliced experiments to {output_experiments_filename}")
sliced_experiments.as_file(output_experiments_filename)
# Save sliced reflections
if slice_refs:
output_reflections_filename = params.output.reflections_filename
if output_reflections_filename is None:
# take first filename as template
bname = basename(params.input.reflections[0].filename)
bname = splitext(bname)[0]
if not bname:
bname = "reflections"
if len(params.image_range) == 1 and params.image_range[0] is not None:
ext = "_{}_{}.refl".format(*params.image_range[0])
else:
ext = "_sliced.refl"
output_reflections_filename = bname + ext
print(f"Saving sliced reflections to {output_reflections_filename}")
sliced_reflections.as_file(output_reflections_filename)
return
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
bsd-3-clause
| -2,975,352,729,658,704,000 | 34.870968 | 88 | 0.60589 | false |
urbn/kombu
|
kombu/__init__.py
|
1
|
4315
|
"""Messaging library for Python."""
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
if sys.version_info < (2, 7): # pragma: no cover
raise Exception('Kombu 4.0 requires Python versions 2.7 or later.')
from collections import namedtuple # noqa
__version__ = '4.6.6'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com, ask@celeryproject.org'
__homepage__ = 'https://kombu.readthedocs.io'
__docformat__ = 'restructuredtext en'
# -eof meta-
version_info_t = namedtuple('version_info_t', (
'major', 'minor', 'micro', 'releaselevel', 'serial',
))
# bumpversion can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
del(_temp)
del(re)
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from kombu.connection import Connection, BrokerConnection # noqa
from kombu.entity import Exchange, Queue, binding # noqa
from kombu.message import Message # noqa
from kombu.messaging import Consumer, Producer # noqa
from kombu.pools import connections, producers # noqa
from kombu.utils.url import parse_url # noqa
from kombu.common import eventloop, uuid # noqa
from kombu.serialization import ( # noqa
enable_insecure_serializers,
disable_insecure_serializers,
)
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType # noqa
all_by_module = {
'kombu.connection': ['Connection', 'BrokerConnection'],
'kombu.entity': ['Exchange', 'Queue', 'binding'],
'kombu.message': ['Message'],
'kombu.messaging': ['Consumer', 'Producer'],
'kombu.pools': ['connections', 'producers'],
'kombu.utils.url': ['parse_url'],
'kombu.common': ['eventloop', 'uuid'],
'kombu.serialization': [
'enable_insecure_serializers',
'disable_insecure_serializers',
],
}
object_origins = {}
for module, items in all_by_module.items():
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Customized Python module."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__', 'VERSION',
'__package__', '__version__', '__author__',
'__contact__', '__homepage__', '__docformat__'))
return result
# 2.5 does not define __package__
try:
package = __package__
except NameError: # pragma: no cover
package = 'kombu'
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
'__file__': __file__,
'__path__': __path__,
'__doc__': __doc__,
'__all__': tuple(object_origins),
'__version__': __version__,
'__author__': __author__,
'__contact__': __contact__,
'__homepage__': __homepage__,
'__docformat__': __docformat__,
'__package__': package,
'version_info_t': version_info_t,
'version_info': version_info,
'VERSION': VERSION,
'absolute_import': absolute_import,
'unicode_literals': unicode_literals,
})
if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover
os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1')
from .utils import debug
debug.setup_logging()
|
bsd-3-clause
| -6,484,372,662,189,597,000 | 32.976378 | 74 | 0.593975 | false |
RedHatInsights/insights-core
|
insights/parsers/tests/test_journal_since_boot.py
|
1
|
1683
|
from insights.parsers.journal_since_boot import JournalSinceBoot
from insights.tests import context_wrap
MSGINFO = """
-- Logs begin at Wed 2017-02-08 15:18:00 CET, end at Tue 2017-09-19 09:12:59 CEST. --
May 18 15:13:34 lxc-rhel68-sat56 jabberd/sm[11057]: session started: jid=rhn-dispatcher-sat@lxc-rhel6-sat56.redhat.com/superclient
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM...
May 18 15:24:28 lxc-rhel68-sat56 yum[11597]: Installed: lynx-2.8.6-27.el6.x86_64
May 18 15:36:19 lxc-rhel68-sat56 yum[11954]: Updated: sos-3.2-40.el6.noarch
Apr 22 10:35:01 boy-bona CROND[27921]: (root) CMD (/usr/lib64/sa/sa1 -S DISK 1 1)
Apr 22 10:37:32 boy-bona crontab[28951]: (root) LIST (root)
Apr 22 10:40:01 boy-bona CROND[30677]: (root) CMD (/usr/lib64/sa/sa1 -S DISK 1 1)
Apr 22 10:41:13 boy-bona crontab[32515]: (root) LIST (root)
""".strip()
def test_messages():
msg_info = JournalSinceBoot(context_wrap(MSGINFO))
bona_list = msg_info.get('(root) LIST (root)')
assert 2 == len(bona_list)
assert bona_list[0].get('timestamp') == "Apr 22 10:37:32"
assert bona_list[1].get('timestamp') == "Apr 22 10:41:13"
crond = msg_info.get('CROND')
assert 2 == len(crond)
assert crond[0].get('procname') == "CROND[27921]"
assert msg_info.get('jabberd/sm[11057]')[0].get('hostname') == "lxc-rhel68-sat56"
assert msg_info.get('Wrapper')[0].get('message') == "--> Wrapper Started as Daemon"
assert msg_info.get('Launching')[0].get('raw_message') == "May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM..."
assert 2 == len(msg_info.get('yum'))
|
apache-2.0
| 1,267,607,497,514,420,700 | 55.1 | 131 | 0.686275 | false |
TylerTemp/md-video
|
md_video.py
|
1
|
7634
|
"""
Video block for python-markdown
This is aimed to provide best output even without this extension
Title is case insensitive, title is no use, can also be `[video]`.
This is only to avoid the quick reference link
Format:
[Video: Title of the Video]

[download.mp4](http://link.to.video/file.mp4)
[download.ogg](http://link.to.video/file.ogv)
[download.webm](http://link.to.video/file.webm)
[subtitle.en-US.vtt](http://link.to.subtitle/en_us.vtt "English")
[subtitle.zh.vtt](http://link.to.subtitle/zh.vtt "Chinese")
Output:
<video controls="controls" poster="http://link.to.poster/link.png">
<source src="http://link.to.video/file.mp4" type="video/mp4" />
<source src="http://link.to.video/file.ogv" type="video/ogg" />
<source src="http://link.to.video/file.webm" type="video/webm" />
<track src="http://link.to.subtitle/en_us.vtt" kind="subtitles" srclang="en-US" label="English" default="default" />
<track src="http://link.to.subtitle/zh.vtt" kind="subtitles" srclang="zh" label="Chinese" />
Your browser does not support the <code>video</code> element
</video>
"""
import re
from markdown.util import etree
from markdown import Extension
from markdown.blockprocessors import BlockProcessor
import logging
__version__ = '0.0.3',
__author__ = 'TylerTemp<tylertempdev@gmail.com>'
logger = logging.getLogger('MARKDOWN.video')
class VideoProcessor(BlockProcessor):
HEADER_RE = re.compile(
r'\[video\]|\[video: .*\]',
re.IGNORECASE
)
LINK_RE = re.compile(
r'(?P<is_img>\!?)\[(?P<text>.*?)\]'
r'('
r'\('
r'(?P<href>.+)'
r'\)'
r'|'
r'\[(?P<ref>.*?)\]'
r')'
)
def __init__(self, *a, **k):
self._cross_origin = k.pop('cross_origin', None)
super(VideoProcessor, self).__init__(*a, **k)
def test(self, parent, block):
if not self.HEADER_RE.match(block):
logger.debug('not matched')
return False
result = self.result = self.parse(block)
if result is None:
logger.debug('not in format')
return False
return True
def run(self, parent, blocks):
result = getattr(self, 'result', None)
if result is None:
result = self.parser(blocks[0])
blocks.pop(0)
poster, sources, subtitles = result
video = etree.SubElement(parent, 'video')
video.set('controls', 'controls')
cross_origin = self._cross_origin
if cross_origin is not None:
video.set('crossorigin', cross_origin)
video.text = ('Your browser does not support the '
'<code>video</code> element')
if poster:
video.set('poster', poster)
for src, type_ in sources:
source = etree.SubElement(video, 'source')
source.set('src', src)
source.set('type', type_)
for index, (src, lang, label) in enumerate(subtitles):
track = etree.SubElement(video, 'track')
track.set('src', src)
track.set('kind', 'subtitles')
if lang:
track.set('srclang', lang)
if label:
track.set('label', label)
if index == 0:
track.set('default', 'default')
return True
def parse(self, block):
lines = block.splitlines()
lines.pop(0)
poster = None
sources = [] # src, type
subtitles = [] # src, lang, label
for line in lines:
result = self.parse_link(line)
if result is None:
logger.debug('line %r not in format', line)
return None
logger.debug(result)
name, link, title = result
if name == 'poster':
poster = link
elif name.startswith('subtitle'):
split_name = name.split('.')
if len(split_name) < 3:
logger.debug('subtitle %r not in format', line)
return None
lang_type = split_name[1]
subtitles.append((link, lang_type, title))
else:
split_type = name.split('.')
if len(split_type) < 2:
logger.debug('source %r not in format', line)
return None
type_ = 'video/%s' % split_type[-1]
sources.append((link, type_))
return poster, sources, subtitles
def parse_link(self, md_str):
match = self.LINK_RE.match(md_str)
if match is None:
logger.debug('%r not in format', md_str)
return None
group = match.groupdict()
text = group['text']
href_mix = group['href']
if href_mix:
if group['is_img']:
sep = href_mix.split(maxsplit=1)
if len(sep) == 1:
href = sep[0]
title = None
else:
href, title = sep
else:
sep = href_mix.rsplit(maxsplit=1)
if len(sep) == 1:
href = sep[0]
title = None
else:
href, title = sep
if not title or len(title) < 2:
href = href_mix
else:
if title[0] == title[-1] and title[0] in '\'"':
title = title[1:-1]
else:
href = href_mix
if href.startswith('<') and href.endswith('>'):
href = href[1:-1]
else:
ref = group['ref']
if ref is None or ref not in self.parser.markdown.references:
logger.debug('ref %r not found', ref)
return None
href, title = self.parser.markdown.references[ref]
return text, href, title
class VideoExtension(Extension):
""" Add definition lists to Markdown. """
def __init__(self, **configs):
self.config = {'crossorigin':
[configs.get('crossorigin', None), 'cross origin']}
super(VideoExtension, self).__init__(**configs)
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
cross_origin = self.getConfig('crossorigin', None)
md.parser.blockprocessors.add('video',
VideoProcessor(
md.parser,
cross_origin=cross_origin),
'>empty')
def makeExtension(**configs):
return VideoExtension(**configs)
if __name__ == '__main__':
import markdown
logging.basicConfig(
level=logging.DEBUG,
format='\033[32m%(levelname)1.1s\033[0m[%(lineno)3s]%(message)s')
md = """
[Video: Title of the Video]

[download.mp4](http://link.to.video/fil e.mp4)
[download.ogg](http://link.to.video/fil e.ogv)
[download.webm](http://link.to.video/fil e.webm)
[subtitle.en-US.vtt](http://link.to.sub title/en_us.vtt "English")
[subtitle.zh.vtt](http://link.to.subtit le/zh.vtt "Chinese")
"""
result = markdown.markdown(md,
extensions=[
makeExtension(crossorigin="anonymous")])
print(result)
|
mit
| -9,049,451,487,628,241,000 | 30.415638 | 122 | 0.517684 | false |
gencer/sentry
|
src/sentry/south_migrations/0362_auto__add_userip__add_unique_userip_user_ip_address.py
|
1
|
89359
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'UserIP'
db.create_table('sentry_userip', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'])),
('ip_address', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['UserIP'])
# Adding unique constraint on 'UserIP', fields ['user', 'ip_address']
db.create_unique('sentry_userip', ['user_id', 'ip_address'])
def backwards(self, orm):
# Removing unique constraint on 'UserIP', fields ['user', 'ip_address']
db.delete_unique('sentry_userip', ['user_id', 'ip_address'])
# Deleting model 'UserIP'
db.delete_table('sentry_userip')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'74dbc78c664240998acbbd9baddc317b326fdb065f2f4895a675cf41cb104b3e'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'0807705461024be5a6978b60a6afc1569f1b2c392dba4b34b0dd207bf308a3f7'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Informed Quail'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'989d172caffb49d8bedfe80bde06ce22'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 11, 1, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 12, 1, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'1bea8eb1814e4117a89012c707b9f38ac8228539c79c4881ab34634e1b031cab'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'48035c31fad242b9a919ebd3364d52d3d8518557c68746d0bd2b2b72729f0ce4'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 11, 8, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'b55a1a81833943858c9188cfa8a5492c'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'instance'),)", 'object_name': 'IdentityProvider'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.minidumpfile': {
'Meta': {'object_name': 'MinidumpFile'},
'event_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 12, 1, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'4e958a58bac44cf0a91078aa82a8aa26'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'qSIMkyqhf7xmjj8f4wDXwdqR3no9wO0X'", 'max_length': '32'})
},
'sentry.useridentity': {
'Meta': {'unique_together': "(('user', 'identity'),)", 'object_name': 'UserIdentity'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identity': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Identity']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
| 5,967,626,897,996,228,000 | 89.904374 | 233 | 0.579516 | false |
IECS/MansOS
|
tools/IDE/src/Settings.py
|
1
|
2586
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
class Settings:
configFile = "config.cfg"
@staticmethod
def __init__():
defaultSettings = {
"active_language" : "ENG",
"platform" : "telosb",
"blockly_location": "../../../seal-blockly/blockly/demos/seal/index.html",
"blockly_port" : '8090',
"blockly_host" : "localhost",
"recently_opened_count" : "10"
}
Settings.config = ConfigParser.SafeConfigParser(defaultSettings)
Settings.config.read(Settings.configFile);
if not Settings.config.has_section("CURRENT"):
Settings.config.add_section("CURRENT")
@staticmethod
def saveConfig():
with open(Settings.configFile, 'wb') as file:
Settings.config.write(file)
@staticmethod
def get(name):
try:
return Settings.config.get("CURRENT", name)
except:
print "No config entry found: " + name
return ""
@staticmethod
def set(name, value):
try:
return Settings.config.set("CURRENT", name, str(value))
except:
print "Can't add config(" + name + " : " + value + ")"
|
mit
| -204,155,399,332,977,860 | 38.8 | 86 | 0.676334 | false |
openmotics/gateway
|
src/gateway/api/serializers/group_action.py
|
1
|
1889
|
# Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GroupAction (de)serializer
"""
from __future__ import absolute_import
from gateway.api.serializers.base import SerializerToolbox
from gateway.dto import GroupActionDTO
if False: # MYPY
from typing import Dict, Optional, List, Tuple
class GroupActionSerializer(object):
WORD_MAX = 2 ** 16 - 1
BYTE_MAX = 255
@staticmethod
def serialize(group_action_dto, fields): # type: (GroupActionDTO, Optional[List[str]]) -> Dict
data = {'id': group_action_dto.id,
'name': group_action_dto.name,
'actions': ','.join([str(action) for action in group_action_dto.actions]),
'internal': group_action_dto.internal}
return SerializerToolbox.filter_fields(data, fields)
@staticmethod
def deserialize(api_data): # type: (Dict) -> GroupActionDTO
group_action_dto = GroupActionDTO(api_data['id'])
SerializerToolbox.deserialize(
dto=group_action_dto, # Referenced
api_data=api_data,
mapping={'name': ('name', None),
'actions': ('actions', lambda s: [] if s == '' else [int(a) for a in s.split(',')])}
)
return group_action_dto
|
agpl-3.0
| -3,511,298,968,444,998,000 | 38.354167 | 105 | 0.670196 | false |
julienbaley/zhtools
|
test/test_zhtools/test_transliteration/test_transliteration.py
|
1
|
1506
|
from zhtools.transliteration import Transliteration
import unittest
class TestTransliteration(unittest.TestCase):
def test_split_syllables(self):
self.assertEqual(Transliteration("wo3bu4zhi1dao4").text,
["wo3", "bu4", "zhi1", "dao4"])
self.assertEqual(Transliteration("zhe4r").text, ["zhe4", "r"])
self.assertEqual(Transliteration("san1 C").text, ["san1", "C"])
self.assertEqual(Transliteration("A B zhi1").text, ["A", "B", "zhi1"])
def test_pinyin(self):
self.assertEqual(Transliteration("zhe4r").get_pinyin(), "zhèr")
self.assertEqual(Transliteration("lu:e4").get_pinyin(), "lüè")
self.assertEqual(Transliteration("yi1hui4r5").get_pinyin(), "yīhuìr")
def test_pinyin_capitals(self):
self.assertEqual(Transliteration("Mao2 Ze2 dong1").get_pinyin(),
"Máo Zédōng")
self.assertEqual(Transliteration("A B zhi1").get_pinyin(), "A B zhī")
def test_non_implemented_tranlisteration(self):
with self.assertRaises(AttributeError):
Transliteration("").get_non_existing_transliteration_method()
def test_attribute_error(self):
with self.assertRaises(AttributeError):
Transliteration("").nonono()
def test_comma_is_excluded(self):
expected = ["zhi4", "zhe3", "qian1", "lü4", ",",
"bi4", "you3", "yi1", "shi1"]
self.assertEqual(Transliteration("".join(expected)).text, expected)
|
gpl-3.0
| -3,367,421,109,927,228,000 | 41.742857 | 78 | 0.631016 | false |
thepaul/uftrace
|
tests/t098_dump_tid.py
|
1
|
2454
|
#!/usr/bin/env python
from runtest import TestBase
import subprocess as sp
TDIR='xxx'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'fork', """
uftrace file header: magic = 4674726163652100
uftrace file header: version = 4
uftrace file header: header size = 40
uftrace file header: endian = 1 (little)
uftrace file header: class = 2 (64 bit)
uftrace file header: features = 0x363 (PLTHOOK | TASK_SESSION | SYM_REL_ADDR | MAX_STACK | PERF_EVENT | AUTO_ARGS)
uftrace file header: info = 0x3bff
reading 5186.dat
58071.916834908 5186: [entry] main(400590) depth: 0
58071.916835853 5186: [entry] fork(400580) depth: 1
58071.917056572 5186: [exit ] fork(400580) depth: 1
58071.917091028 5186: [entry] wait(400570) depth: 1
58071.918038822 5186: [exit ] wait(400570) depth: 1
58071.918040938 5186: [entry] a(400774) depth: 1
58071.918041182 5186: [entry] b(400741) depth: 2
58071.918041482 5186: [entry] c(400706) depth: 3
58071.918042306 5186: [entry] getpid(400530) depth: 4
58071.918045615 5186: [exit ] getpid(400530) depth: 4
58071.918048103 5186: [exit ] c(400706) depth: 3
58071.918048457 5186: [exit ] b(400741) depth: 2
58071.918048760 5186: [exit ] a(400774) depth: 1
58071.918049117 5186: [exit ] main(400590) depth: 0
reading 5188.dat
""", sort='dump')
def pre(self):
record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-' + self.name)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def runcmd(self):
import os.path
t = 0
for ln in open(os.path.join(TDIR, 'task.txt')):
if not ln.startswith('TASK'):
continue
try:
t = int(ln.split()[2].split('=')[1])
except:
pass
if t == 0:
return 'FAILED TO FIND TID'
return '%s dump -d %s --tid %d' % (TestBase.uftrace_cmd, TDIR, t)
def post(self, ret):
sp.call(['rm', '-rf', TDIR])
return ret
def fixup(self, cflags, result):
import platform
if platform.architecture()[0] == '32bit':
result = result.replace("2 (64 bit)", "1 (32 bit)")
p = sp.Popen(['file', 't-' + self.name], stdout=sp.PIPE)
if 'BuildID' not in p.communicate()[0].decode(errors='ignore'):
result = result.replace("0xbff", "0xbfd")
return result
|
gpl-2.0
| 676,877,935,123,326,600 | 35.088235 | 119 | 0.603912 | false |
h4ck3rm1k3/orca-sonar
|
src/orca/generator.py
|
1
|
46495
|
# Orca
#
# Copyright 2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Superclass of classes used to generate presentations for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2009 Sun Microsystems Inc."
__license__ = "LGPL"
import sys
import time
import traceback
import pyatspi
from . import braille
from . import debug
from . import messages
from . import settings
from .orca_i18n import _ # for gettext support
import collections
def _formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.args
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
class Generator:
"""Takes accessible objects and generates a presentation for those
objects. See the generate method, which is the primary entry
point."""
# pylint: disable-msg=W0142
def __init__(self, script, mode):
# pylint: disable-msg=W0108
self._mode = mode
self._script = script
self._methodsDict = {}
for method in \
[z for z in [getattr(self, y).__get__(self, self.__class__) for y in [x for x in dir(self) if x.startswith(METHOD_PREFIX)]] if isinstance(z, collections.Callable)]:
name = method.__name__[len(METHOD_PREFIX):]
name = name[0].lower() + name[1:]
self._methodsDict[name] = method
self._verifyFormatting()
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
globalsDict['obj'] = None
globalsDict['role'] = None
globalsDict['pyatspi'] = pyatspi
def _verifyFormatting(self):
# Verify the formatting strings are OK. This is only
# for verification and does not effect the function of
# Orca at all.
# Populate the entire globals with empty arrays
# for the results of all the legal method names.
#
globalsDict = {}
for key in list(self._methodsDict.keys()):
globalsDict[key] = []
self._addGlobals(globalsDict)
for roleKey in self._script.formatting[self._mode]:
for key in ["focused", "unfocused"]:
try:
evalString = \
self._script.formatting[self._mode][roleKey][key]
except:
continue
else:
if not evalString:
# It's legal to have an empty string.
#
continue
while True:
try:
eval(evalString, globalsDict)
break
except NameError:
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"Unable to find function for '%s'\n" % arg)
globalsDict[arg] = []
except:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"While processing '%s' '%s' '%s' '%s'" \
% (roleKey, key, evalString, globalsDict))
break
def _overrideRole(self, newRole, args):
"""Convenience method to allow you to temporarily override the role in
the args dictionary. This changes the role in args ags
returns the old role so you can pass it back to _restoreRole.
"""
oldRole = args.get('role', None)
args['role'] = newRole
return oldRole
def _restoreRole(self, oldRole, args):
"""Convenience method to restore the old role back in the args
dictionary. The oldRole should have been obtained from
_overrideRole. If oldRole is None, then the 'role' key/value
pair will be deleted from args.
"""
if oldRole:
args['role'] = oldRole
else:
del args['role']
def generate(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the complete presentatin for the
object. The presentatin to be generated depends highly upon the
formatting strings in formatting.py.
args is a dictionary that may contain any of the following:
- alreadyFocused: if True, we're getting an object
that previously had focus
- priorObj: if set, represents the object that had focus before
this object
- includeContext: boolean (default=True) which says whether
the context for an object should be included as a prefix
and suffix
- role: a role to override the object's role
- formatType: the type of formatting, such as
'focused', 'basicWhereAmI', etc.
- forceMnemonic: boolean (default=False) which says if we
should ignore the settings.enableMnemonicSpeaking setting
- forceTutorial: boolean (default=False) which says if we
should force a tutorial to be spoken or not
"""
startTime = time.time()
result = []
globalsDict = {}
self._addGlobals(globalsDict)
globalsDict['obj'] = obj
try:
globalsDict['role'] = args.get('role', obj.getRole())
except:
msg = 'Cannot generate presentation for: %s. Aborting' % obj
debug.println(debug.LEVEL_FINEST, msg)
return result
try:
# We sometimes want to override the role. We'll keep the
# role in the args dictionary as a means to let us do so.
#
args['role'] = globalsDict['role']
# We loop through the format string, catching each error
# as we go. Each error should always be a NameError,
# where the name is the name of one of our generator
# functions. When we encounter this, we call the function
# and get its results, placing them in the globals for the
# the call to eval.
#
args['mode'] = self._mode
if not args.get('formatType', None):
if args.get('alreadyFocused', False):
args['formatType'] = 'focused'
else:
args['formatType'] = 'unfocused'
formatting = self._script.formatting.getFormat(**args)
# Add in the context if this is the first time
# we've been called.
#
if not args.get('recursing', False):
if args.get('includeContext', True):
prefix = self._script.formatting.getPrefix(**args)
suffix = self._script.formatting.getSuffix(**args)
formatting = '%s + %s + %s' % (prefix, formatting, suffix)
args['recursing'] = True
firstTimeCalled = True
else:
firstTimeCalled = False
details = debug.getAccessibleDetails(debug.LEVEL_ALL, obj)
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, "\nPREPARATION TIME: %s" % duration)
debug.println(
debug.LEVEL_ALL,
"generate %s for %s %s (args=%s) using '%s'" \
% (self._mode,
args['formatType'],
details,
repr(args),
formatting))
assert(formatting)
while True:
currentTime = time.time()
try:
result = eval(formatting, globalsDict)
break
except NameError:
result = []
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
debug.println(
debug.LEVEL_SEVERE,
"Unable to find function for '%s'\n" % arg)
break
globalsDict[arg] = self._methodsDict[arg](obj, **args)
duration = "%.4f" % (time.time() - currentTime)
debug.println(debug.LEVEL_ALL,
"GENERATION TIME: %s ----> %s=%s" \
% (duration, arg, repr(globalsDict[arg])))
except:
debug.printException(debug.LEVEL_SEVERE)
result = []
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, "COMPLETION TIME: %s" % duration)
debug.println(debug.LEVEL_ALL, "generate %s results:" % self._mode)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings, with
the exception that the pyatspi.ROLE_UNKNOWN role will yield an
empty array. Note that a 'role' attribute in args will
override the accessible role of the obj.
"""
# Subclasses must override this.
return []
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
result = []
name = self._script.utilities.displayedText(obj)
if name:
result.append(name)
else:
try:
description = obj.description
except (LookupError, RuntimeError):
return result
if description:
result.append(description)
# To make the unlabeled icons in gnome-panel more accessible.
try:
role = args.get('role', obj.getRole())
except (LookupError, RuntimeError):
return result
if not result and obj.getRole() == pyatspi.ROLE_ICON \
and obj.parent.getRole() == pyatspi.ROLE_PANEL:
return self._generateName(obj.parent)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
result = [x for x in obj.getAttributes() if x.startswith('placeholder-text:')]
return [x.replace('placeholder-text:', '') for x in result]
def _generateLabelAndName(self, obj, **args):
"""Returns the label and the name as an array of strings for speech
and braille. The name will only be present if the name is
different from the label.
"""
result = []
label = self._generateLabel(obj, **args)
name = self._generateName(obj, **args)
result.extend(label)
if not len(label):
result.extend(name)
elif len(name) and name[0] != label[0]:
result.extend(name)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if obj.description and not obj.description in [obj.name, label]:
result.append(obj.description)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if label:
result.append(label)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImageDescription(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represent the description of the image on the object, if it
exists. Otherwise, an empty array is returned.
"""
result = []
try:
image = obj.queryImage()
except NotImplementedError:
pass
else:
description = image.imageDescription
if description and len(description):
result.append(description)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateAvailability(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the grayed/sensitivity/availability state of the
object, but only if it is insensitive (i.e., grayed out and
inactive). Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'insensitive'
if not obj.getState().contains(pyatspi.STATE_SENSITIVE):
result.append(self._script.formatting.getString(**args))
return result
def _generateRequired(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the required state of the object, but only if it is
required (i.e., it is in a dialog requesting input and the
user must give it a value). Otherwise, and empty array will
be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'required'
if obj.getState().contains(pyatspi.STATE_REQUIRED) \
or (obj.getRole() == pyatspi.ROLE_RADIO_BUTTON \
and obj.parent.getState().contains(pyatspi.STATE_REQUIRED)):
result.append(self._script.formatting.getString(**args))
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'readonly'
if settings.presentReadOnlyText \
and self._script.utilities.isReadOnlyTextArea(obj):
result.append(self._script.formatting.getString(**args))
return result
def _generateCellCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes that are in a table. An empty array will be
returned if this is not a checkable cell.
"""
result = []
try:
action = obj.queryAction()
except NotImplementedError:
action = None
if action:
for i in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(i) in ["toggle", _("toggle")]:
oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX,
args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_INDETERMINATE):
result.append(indicators[2])
elif state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'radiobutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateChildWidget(self, obj, **args):
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_TOGGLE_BUTTON]
isWidget = lambda x: x and x.getRole() in widgetRoles
# For GtkListBox, such as those found in the control center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
widget = pyatspi.findDescendant(obj, isWidget)
if widget:
return self.generate(widget, includeContext=False)
return []
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'togglebutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED) \
or state.contains(pyatspi.STATE_PRESSED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
if obj.getState().contains(pyatspi.STATE_CHECKED):
# Translators: this represents the state of a checked menu item.
#
result.append(indicators[1])
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'expansion'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings to be used in speech and braille that
represent the row header for an object that is in a table, if
it exists. Otherwise, an empty array is returned.
"""
result = []
# Do not return yourself as a header.
#
role = args.get('role', obj.getRole())
if role in [pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_TABLE_ROW_HEADER]:
return result
if not args.get('mode', None):
args['mode'] = self._mode
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
try:
rowIndex = table.getRowAtIndex(index)
except:
rowIndex = -1
if rowIndex >= 0:
# Get the header information. In Java Swing, the
# information is not exposed via the description
# but is instead a header object, so we fall back
# to that if it exists.
#
# [[[TODO: WDW - the more correct thing to do, I
# think, is to look at the row header object.
# We've been looking at the description for so
# long, though, that we'll give the description
# preference for now.]]]
#
desc = table.getRowDescription(rowIndex)
if not desc:
header = table.getRowHeader(rowIndex)
if header:
desc = self._script.utilities.displayedText(header)
if desc and len(desc):
text = desc
if args['mode'] == 'speech':
if settings.speechVerbosityLevel \
== settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_ROW_HEADER)
elif args['mode'] == 'braille':
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_ROW_HEADER)
result.append(text)
return result
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists. Otherwise, an empty array
is returned.
"""
result = []
# Do not return yourself as a header.
#
try:
role = args.get('role', obj.getRole())
except:
role = None
if role in [pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_COLUMN_HEADER]:
return result
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
columnIndex = table.getColumnAtIndex(index)
if columnIndex >= 0:
# Get the header information. In Java Swing, the
# information is not exposed via the description
# but is instead a header object, so we fall back
# to that if it exists.
#
# [[[TODO: WDW - the more correct thing to do, I
# think, is to look at the column header object.
# We've been looking at the description for so
# long, though, that we'll give the description
# preference for now.]]]
#
desc = table.getColumnDescription(columnIndex)
if not desc:
header = table.getColumnHeader(columnIndex)
if header:
desc = self._script.utilities.displayedText(header)
if desc and len(desc):
text = desc
if args['mode'] == 'speech':
if settings.speechVerbosityLevel \
== settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_COLUMN_HEADER)
elif args['mode'] == 'braille':
text = desc + " " + self.getLocalizedRoleName(
obj, pyatspi.ROLE_COLUMN_HEADER)
result.append(text)
return result
def _generateTableCell2ChildLabel(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
label of a toggle in a table cell that has a special 2 child
pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
try:
action = child.queryAction()
except NotImplementedError:
continue
else:
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle", _("toggle")]:
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if not hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateTableCell2ChildToggle(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
toggle value of a toggle in a table cell that has a special 2
child pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
try:
action = child.queryAction()
except NotImplementedError:
continue
else:
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle", _("toggle")]:
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):
"""If this table cell has a "toggle" action, and doesn't have any
label associated with it then also speak the table column
header. See Orca bug #455230 for more details.
"""
# If we're reading just a single cell in speech, the new
# header portion is going to give us this information.
#
if args['mode'] == 'speech' and not args.get('readingRow', False):
return []
result = []
try:
parentTable = obj.parent.queryTable()
except:
return result
try:
action = obj.queryAction()
label = self._script.utilities.displayedText(
self._script.utilities.realActiveDescendant(obj))
except NotImplementedError:
action = None
label = None
if action and (label == None or len(label) == 0):
index = self._script.utilities.cellIndex(obj)
column = parentTable.getColumnAtIndex(index)
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle",
_("toggle")]:
accHeader = \
parentTable.getColumnHeader(column)
result.append(accHeader.name)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to
complexity in the code. This method is used to return an
array of strings for use by speech and braille for a single
table cell itself. The string, 'blank', is added for empty
cells.
"""
result = []
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateTable(self, obj, **args):
"""Returns an array of strings for use by speech and braille to present
the size of a table."""
try:
table = obj.queryTable()
except:
return []
return [messages.tableSize(table.nRows, table.nColumns)]
def _generateTableCellRow(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for an entire row
in a table if that's what the user has requested and if the row
has changed. Otherwise, it will return an array for just the
current cell.
"""
result = []
try:
parentTable = obj.parent.queryTable()
except:
parentTable = None
isDetailedWhereAmI = args.get('formatType', None) == 'detailedWhereAmI'
if (settings.readTableCellRow or isDetailedWhereAmI) and parentTable \
and (not self._script.utilities.isLayoutOnly(obj.parent)):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parentTable.getRowAtIndex(index)
column = parentTable.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
presentAll = True
if isDetailedWhereAmI:
if parentTable.nColumns <= 1:
return result
elif "lastRow" in self._script.pointOfReference \
and "lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
presentAll = \
(self._mode == 'braille') \
or \
((pointOfReference["lastRow"] != row) \
or ((row == 0 or row == parentTable.nRows-1) \
and pointOfReference["lastColumn"] == column))
if presentAll:
args['readingRow'] = True
if self._script.utilities.isTableRow(obj):
cells = [x for x in obj]
else:
cells = [parentTable.getAccessibleAt(row, i) \
for i in range(parentTable.nColumns)]
for cell in cells:
if not cell:
continue
state = cell.getState()
showing = state.contains(pyatspi.STATE_SHOWING)
if showing:
cellResult = self._generateRealTableCell(cell, **args)
if cellResult and result and self._mode == 'braille':
result.append(braille.Region(
settings.brailleTableCellDelimiter))
result.extend(cellResult)
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(self._generateRealTableCell(obj, **args))
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateCurrentLineText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
[text, caretOffset, startOffset] = self._script.getTextLineAtCaret(obj)
return [text]
def _generateDisplayedText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represents all the text being displayed by the object.
"""
displayedText = self._script.utilities.displayedText(obj)
if not displayedText:
return []
return [displayedText]
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNodeLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the tree node level of the object, or an empty
array if the object is not a tree node.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nodelevel'
level = self._script.utilities.nodeLevel(obj)
if level >= 0:
result.append(self._script.formatting.getString(**args)\
% (level + 1))
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generateValue(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the value of the object. This is typically the
numerical value, but may also be the text of the 'value'
attribute if it exists on the object. [[[WDW - we should
consider returning an empty array if there is no value.
"""
return [self._script.utilities.textForValue(obj)]
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateApplicationName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the name of the applicaton for the object.
"""
result = []
try:
result.append(obj.getApplication().name)
except:
pass
return result
def _generateNestingLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the nesting level of an object in a list.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nestinglevel'
nestingLevel = self._script.utilities.nestingLevel(obj)
if nestingLevel:
result.append(self._script.formatting.getString(**args)\
% nestingLevel)
return result
def _generateRadioButtonGroup(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the radio button group label for the object, or an
empty array if the object has no such label.
"""
result = []
try:
role = obj.getRole()
except:
role = None
if role == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
break
if radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
else:
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() in [pyatspi.ROLE_PANEL,
pyatspi.ROLE_FILLER]:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generateRealActiveDescendantDisplayedText(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the text actually being painted in the cell, if it can be
found. Otherwise, an empty array is returned.
"""
result = []
text = self._script.utilities.displayedText(
self._script.utilities.realActiveDescendant(obj))
if text:
result = [text]
return result
def _generateRealActiveDescendantRoleName(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the role of the object actually being painted in the cell.
"""
rad = self._script.utilities.realActiveDescendant(obj)
args['role'] = rad.getRole()
return self._generateRoleName(rad, **args)
def _generateNamedContainingPanel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the nearest ancestor of an object which is a named panel.
"""
result = []
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_PANEL:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
|
lgpl-2.1
| -3,497,708,915,490,167,000 | 41.734375 | 176 | 0.521841 | false |
SeedScientific/polio
|
source_data/migrations/0095_auto__del_unique_sourceregion_region_string__del_unique_sourceregion_r.py
|
1
|
75619
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'SourceRegion', fields ['region_string', 'document', 'region_type', 'country']
db.delete_unique('source_region', ['region_string', 'document_id', 'region_type', 'country'])
# Adding unique constraint on 'SourceRegion', fields ['region_code', 'document']
db.create_unique('source_region', ['region_code', 'document_id'])
def backwards(self, orm):
# Removing unique constraint on 'SourceRegion', fields ['region_code', 'document']
db.delete_unique('source_region', ['region_code', 'document_id'])
# Adding unique constraint on 'SourceRegion', fields ['region_string', 'document', 'region_type', 'country']
db.create_unique('source_region', ['region_string', 'document_id', 'region_type', 'country'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'campaign_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CampaignType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'get_full_name'", 'unique_with': '()'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.campaigntype': {
'Meta': {'object_name': 'CampaignType', 'db_table': "'campaign_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'unique_together': "(('name', 'region_type', 'office'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'region_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.RegionType']"}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '55', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.regiontype': {
'Meta': {'object_name': 'RegionType', 'db_table': "'region_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
'source_data.activityreport': {
'Meta': {'object_name': 'ActivityReport'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.campaignmap': {
'Meta': {'object_name': 'CampaignMap', 'db_table': "'campaign_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'source_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceCampaign']", 'unique': 'True'})
},
'source_data.clustersupervisor': {
'Meta': {'object_name': 'ClusterSupervisor'},
'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_lgac': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.document': {
'Meta': {'ordering': "('-id',)", 'unique_together': "(('docfile', 'doc_text'),)", 'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'doc_text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'source_data.documentmeta': {
'Meta': {'unique_together': "(('document', 'source_string', 'model_type'),)", 'object_name': 'DocumentMeta', 'db_table': "'document_meta'"},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'master_object_id': ('django.db.models.fields.IntegerField', [], {}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source_datapoint_count': ('django.db.models.fields.IntegerField', [], {}),
'source_object_id': ('django.db.models.fields.IntegerField', [], {}),
'source_string': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.etljob': {
'Meta': {'ordering': "('-date_attempted',)", 'object_name': 'EtlJob'},
'cron_guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'date_attempted': ('django.db.models.fields.DateTimeField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'success_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
'source_data.healthcamp': {
'Meta': {'object_name': 'HealthCamp'},
'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.indicatormap': {
'Meta': {'object_name': 'IndicatorMap', 'db_table': "'indicator_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'source_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceIndicator']", 'unique': 'True'})
},
'source_data.knowthepeople': {
'Meta': {'object_name': 'KnowThePeople'},
'brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'citiesvisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofpax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prefferedcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.paxlistreporttraining': {
'Meta': {'object_name': 'PaxListReportTraining'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'emailaddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofparticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.phoneinventory': {
'Meta': {'object_name': 'PhoneInventory'},
'asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmbirthrecord': {
'Meta': {'object_name': 'PracticeVCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsettcoordinates': {
'Meta': {'object_name': 'PracticeVCMSettCoordinates'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsummary': {
'Meta': {'object_name': 'PracticeVCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'source_data.regionmap': {
'Meta': {'object_name': 'RegionMap', 'db_table': "'region_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
u'source_data.sourcecampaign': {
'Meta': {'object_name': 'SourceCampaign', 'db_table': "'source_campaign'"},
'campaign_string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint', 'db_table': "'source_datapoint'"},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceindicator': {
'Meta': {'object_name': 'SourceIndicator', 'db_table': "'source_indicator'"},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregion': {
'Meta': {'unique_together': "(('region_code', 'document'),)", 'object_name': 'SourceRegion', 'db_table': "'source_region'"},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregionpolygon': {
'Meta': {'object_name': 'SourceRegionPolygon', 'db_table': "'source_region_polygon'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polygon': ('django.db.models.fields.TextField', [], {}),
'shape_area': ('django.db.models.fields.FloatField', [], {}),
'shape_len': ('django.db.models.fields.FloatField', [], {}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
'source_data.vcmbirthrecord': {
'Meta': {'object_name': 'VCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsettlement': {
'Meta': {'object_name': 'VCMSettlement'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummary': {
'Meta': {'object_name': 'VCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummarynew': {
'Meta': {'object_name': 'VCMSummaryNew'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax6': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax7': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax8': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax9': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_display_msd3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_tot_missed_check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_12_59months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_2_11months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_census': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_missed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_newborns': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax12_59mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax2_11mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vaxnewborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vwsregister': {
'Meta': {'object_name': 'VWSRegister'},
'acceptphoneresponsibility': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 13, 0, 0)'}),
'datephonecollected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'personal_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wardcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['source_data']
|
agpl-3.0
| -8,181,385,182,158,380,000 | 90.993917 | 195 | 0.567886 | false |
OpenTransitTools/utils
|
ott/utils/parse/cmdline/db_cmdline.py
|
1
|
1334
|
from .base_cmdline import *
def is_spatial(parser):
parser.add_argument(
'--is_geospatial',
'-geo',
'-g',
action="store_true",
help="add geometry columns"
)
def db_parser(prog_name='bin/loader', tables=['Could be (Decarative) Base.metadata.sorted_tables'], url_required=True, do_parse=False, add_misc=False):
"""
create a generic database commandline arg PARSER
"""
from .base_cmdline import blank_parser
parser = blank_parser(prog_name, add_misc)
parser.add_argument(
'--database_url',
'-d',
required=url_required,
help="(geo) database url ala dialect+driver://user:password@host/dbname[?key=value..]"
)
parser.add_argument(
'--schema',
'-schema',
'-s',
help="database schema"
)
parser.add_argument(
'--user',
'-user',
'-u',
help="database user"
)
parser.add_argument(
'--tables',
choices=tables, default=None, nargs='*',
help="Limited list of TABLES to load, if blank, load all tables"
)
create_option(parser)
clear_option(parser)
is_spatial(parser)
# return either parser or args
if do_parse:
ret_val = parser.parse_args()
else:
ret_val = parser
return ret_val
|
mpl-2.0
| -1,641,518,630,436,433,000 | 24.169811 | 151 | 0.572714 | false |
torotil/dbuild.py
|
drupy/objects.py
|
1
|
17114
|
from copy import deepcopy, copy
import os.path
import urllib.parse
import urllib.request
import hashlib
import json
import setuptools.archive_util
import shutil
import collections
import re
from functools import partial
from glob import glob
def addDefaults(config, defaults):
queue = [(config, defaults)]
while len(queue) > 0:
c, d = queue.pop(0)
for k in d.keys():
if k in c:
if isinstance(c[k], dict) and isinstance(d[k], dict):
queue.append((c[k], d[k]))
else:
c[k] = deepcopy(d[k])
parsers = {
'.json': partial(json.load, object_pairs_hook=collections.OrderedDict)
}
# Optionally load support for yaml config files.
try:
from ruamel import yaml
parsers['.yaml'] = partial(yaml.load, Loader=yaml.RoundTripLoader)
except ImportError:
pass
def get_parser(path):
_, ext = os.path.splitext(path)
return parsers[ext]
class Config:
defaults = {}
def __init__(self, runner, path):
self.runner = runner
self.path = path
self.config = self.readConfig()
def readConfig(self):
o = self.runner.options
files = [(None, self.path)]
data = collections.OrderedDict()
while (len(files) > 0):
relTo, path = files.pop(0)
path = self.runner.getDownloader({'url': path}) \
.download(relTo, o.downloadDir).localpath()
new_data = self.readFile(path)
if 'includes' in new_data:
includes = new_data['includes']
del new_data['includes']
relTo = os.path.dirname(path)
for inc in includes:
files.append((relTo, inc))
addDefaults(data, new_data)
addDefaults(data, self.defaults)
return data
def readFile(self, path):
parser = get_parser(path)
with open(path) as configfile:
try:
return parser(configfile)
except ValueError as e:
raise ValueError('Error while parsing %s: %s' % (path, str(e)))
class Tree(Config):
defaults = {
'documentRoot': 'htdocs',
'projectsDir': 'projects',
'downloadDir': 'downloads',
'core': {
'project': None,
'profiles': {},
'protected': []
},
'projects': {},
}
def __init__(self, runner, path):
Config.__init__(self, runner, path)
self.projects = collections.OrderedDict()
for dirname, config in self.config['projects'].items():
config['dirname'] = dirname
self.projects[dirname] = runner.getProject(config)
self.sites = {}
for configpath in glob(os.path.dirname(path) + '/*.site.*'):
basename = os.path.basename(configpath)
site = basename[:basename.find('.')]
if '.' not in site:
self.sites[site] = Site(self.runner, site, configpath)
@property
def defined_projects(self):
return frozenset(self.projects.keys())
@property
def installed_projects(self):
o = self.runner.options
return frozenset(os.listdir(os.path.join(o.installDir, o.projectsDir)))
@property
def used_projects(self):
used_projects = set()
for s in self.sites.values():
used_projects.update(s.projects())
used_projects.add(self.config['core']['project'])
return used_projects
class Site(Config):
defaults = {
'profile': 'standard',
'db-url': None,
'site-mail': None,
'site-name': None,
'account-mail': None,
'links': {}
}
def __init__(self, runner, name, path):
Config.__init__(self, runner, path)
self.site = name
if not self.config['db-url']:
self.config['db-url'] = 'dpl:dplpw@localhost/' + name
def project_from_symlink_path(self, path):
project = path
# The symlink might point to a sub-directory of the project.
if '/' in project:
project = project[:project.find('/')]
return project
def projects(self):
q = [self.config['links']]
while q:
d = q.pop(0)
for alias, project_or_dir in d.items():
if isinstance(project_or_dir, dict):
q.append(project_or_dir)
else:
yield self.project_from_symlink_path(project_or_dir)
profile = self.profile()
if profile:
path = self.runner.config.config['core']['profiles'][profile]
yield self.project_from_symlink_path(path)
def profile(self):
profile = self.config['profile']
if profile not in ('minimal', 'standard', 'testing'):
return profile
class TypedFactory:
def __init__(self, runner, name, types):
self.runner, self.name, self.types = runner, name, types
def produce(self, config):
for t in self.types:
try:
obj = t(self.runner, config)
if obj.isValid():
return obj
except ValueError as e:
""" Implementations can err out of non-applicable configs. """
if self.runner.options.verbose:
print('Not a %s: %s' % (t.__name__, e))
raise Exception("No matching %s for input: %s" % (self.name, config))
class Downloader:
def __init__(self, runner, config):
self.runner = runner
self.url = config['url']
self.hash = None
if self.url.find('#') != -1:
self.url, self.hash = self.url.split('#', 1)
self.scheme = urllib.parse.urlparse(self.url).scheme
def download(self, relTo, store):
return self
def localpath(self):
return self.url
def isValid(self):
return True
def convertToMake(self, pfx, patchShortHand=False):
if patchShortHand:
print("%s = %s" % (pfx, self.url))
else:
print("%s[type] = file" % (pfx))
print("%s[url] = %s" % (pfx, self.url))
class ScmNoopDownloader(Downloader):
def __init__(self, runner, config):
hasScmType = 'type' in config and config['type'] in ['git']
hasRevisionOrBranch = 'revision' in config or 'branch' in config
if not hasScmType and not hasRevisionOrBranch:
raise ValueError('This is not a SCM ressource')
Downloader.__init__(self, runner, config)
self.scmType = 'git'
self.branch = config['branch'] if 'branch' in config else False
self.revision = config['revision'] if 'revision' in config else False
def convertToMake(self, pfx, patchShortHand=False):
print(pfx + '[type] = ' + self.scmType)
print(pfx + '[url] = ' + self.url)
if self.branch:
print(pfx + '[branch] = ' + self.branch)
if self.revision:
print(pfx + '[revision] = ' + self.revision)
class LocalDownloader(Downloader):
def download(self, relTo, store):
if not relTo or os.path.isabs(self.url):
self.path = self.url
else:
self.path = os.path.join(relTo, self.url)
return self
def localpath(self):
return self.path
def isValid(self):
return not self.scheme
class UrllibDownloader(Downloader):
def __init__(self, runner, config):
Downloader.__init__(self, runner, config)
def download(self, relTo, store):
filename = self.url.replace('/', '-').replace(':', '-')
self.path = os.path.join(store, filename)
if os.path.exists(self.path):
if not self.hash or self.getHash() == self.hash:
return
else:
os.unlink(self.path)
if self.runner.options.verbose:
print("Downloading %s" % self.url)
try:
f = urllib.request.urlopen(self.url)
except urllib.error.HTTPError as e:
msg = 'Error during download of {}: {}'
raise Exception(msg.format(self.url, str(e)))
with open(self.path, 'wb') as target:
target.write(f.read())
if self.hash:
actual_hash = self.getHash()
if self.hash != actual_hash:
msg = 'Hash of file downloaded from {} wrong: {} instead of {}'
raise Exception(msg.format(self.url, actual_hash, self.hash))
return self
def getHash(self):
with open(self.path, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def localpath(self):
return self.path
def isValid(self):
schemes = ['http', 'https', 'ftp']
return self.scheme in schemes and not self.url.endswith('.git')
class Ressource:
def __init__(self, runner, config):
self.runner = runner
self.config = deepcopy(config)
if type(self.config) is str:
self.config = {'url': config}
addDefaults(self.config, dict(devel=None))
def download(self):
o = self.runner.options
downloader = self.runner.getDownloader(self.config)
downloader.download(o.sourceDir, o.downloadDir)
self.config['localpath'] = downloader.localpath()
def applyTo(self, target):
devel = self.config['devel']
if devel is not None and devel != self.runner.options.devel:
"Don't apply ressources that are production or devel only"
return
applier = self.runner.getApplier(self.config)
applier.applyTo(target)
def convertToMake(self, pfx, patchShortHand=False):
if 'purpose' in self.config:
comment = '; ' + self.config['purpose']
if 'link' in self.config:
comment += ' - ' + self.config['link']
print(comment)
downloader = self.runner.getDownloader(self.config)
downloader.convertToMake(pfx, patchShortHand)
class Applier:
def __init__(self, runner, config):
self.runner = runner
self.path = config['localpath']
self.type = config['type'] if 'type' in config else None
self.config = config
class TarballExtract(Applier):
exts = ['.tar.gz', '.tgz', '.tar.bz2', 'tbz2', '.tar.xz', '.tar', '.zip']
def applyTo(self, target):
unpack = setuptools.archive_util.unpack_archive
# Dry run to find longest prefix.
paths = []
def recordPaths(name, destination):
paths.append(name)
return False
unpack(self.path, target, progress_filter=recordPaths)
prefix = len(os.path.commonprefix(paths))
# Actuall unpacking.
def extractFilter(name, destination):
if len(name) <= prefix:
return False
name = name[prefix:]
if name.startswith('/'):
name = name[1:]
return target + '/' + name
unpack(self.path, target, progress_filter=extractFilter)
def isValid(self):
if self.type == 'tarball':
return True
for ext in self.exts:
if self.path.endswith(ext):
return True
return False
class PatchApplier(Applier):
def applyTo(self, target):
cmd = 'patch --no-backup-if-mismatch -p1 -d {} < {}'
self.runner.command(cmd.format(target, self.path), shell=True)
def isValid(self):
p = self.path
return p.endswith('.patch') or p.endswith('.diff') \
or self.type == 'patch'
class CopyFileApplier(Applier):
def __init__(self, runner, config):
Applier.__init__(self, runner, config)
addDefaults(config, dict(filepath=os.path.basename(config['url'])))
self.filepath = config['filepath']
def applyTo(self, target):
shutil.copyfile(self.path, os.path.join(target, self.filepath))
def isValid(self):
return os.path.isfile(self.path)
class GitRepoApplier(Applier):
def __init__(self, runner, config):
Applier.__init__(self, runner, config)
self.url = config['url']
self.shallow = config.get('shallow', True)
def applyTo(self, target):
call = ['git', 'clone', self.url]
if 'branch' in self.config:
call += ['-b', self.config['branch']]
has_revision = 'revision' in self.config and self.config['revision']
if self.shallow and not has_revision:
call += ['--depth', '1']
call.append(target)
self.runner.command(call)
if has_revision:
wd = os.getcwd()
os.chdir(target)
self.runner.command(['git', 'checkout', self.config['revision']])
os.chdir(wd)
def isValid(self):
return self.type == 'git' or 'branch' in self.config \
or 'revision' in self.config
class DirectoryApplier(Applier):
def applyTo(self, target):
self.runner.ensureDir(target)
self.runner.command(['rsync', '-rlt', self.path+'/', target+'/'])
def isValid(self):
return os.path.isdir(self.path)
class Project:
def __init__(self, runner, config):
addDefaults(config, {
'type': None,
'symlinks': None,
'build': [],
'protected': False,
})
self.runner = runner
self.config = config
self.hash = self.hashDict(self.config)
self.dirname = config['dirname']
self.pipeline = deepcopy(config['build'])
self.git = False
self.type = config['type']
self.protected = config['protected']
def isValid(self):
return True
def build(self, target):
self.runner.ensureDir(target)
for config in self.pipeline:
ressource = Ressource(self.runner, config)
ressource.download()
ressource.applyTo(target)
def hashDict(self, the_dict):
jsonDump = json.dumps(the_dict, sort_keys=True)
return hashlib.sha1(jsonDump.encode('utf-8')).hexdigest()
def convertToMake(self):
parts = self.dirname.split('-', 2)
pkey = "projects[%s]" % parts[0]
pipeline = copy(self.pipeline)
first = Ressource(self.runner, pipeline.pop(0))
first.convertToMake(pkey + '[download]')
for config in pipeline:
ressource = Ressource(self.runner, config)
ressource.convertToMake(pkey + '[patch][]', True)
print()
class DrupalOrgProject(Project):
package_pattern = re.compile('([a-z0-9_]+)-(\\d+\\.x)-(\\d+\\.x-dev|\\d+\\.\\d+(-(alpha|beta|rc)\d+)?)')
url_pattern = 'https://ftp.drupal.org/files/projects/{}-{}-{}.tar.gz'
def __init__(self, runner, config):
"""
Split dirname to see if this is a valid drupal.org package spec.
- Automatically prepends downloading the drupal.org package to the build
queue.
- Packages with a valid spect are detected as drupal.org packages even
if they don't declare config['type'] = 'drupal.org' explicitly.
"""
Project.__init__(self, runner, config)
try:
project, core, version, patches = self.split_project(self.dirname)
# Prepend drupal.org package download if there is not already
# another non-patch build item in the pipeline.
if not self.pipeline or self.is_patch(self.pipeline[0]):
build = dict(url=self.url_pattern.format(project, core, version))
if 'hash' in self.config:
build['hash'] = self.config['hash']
self.pipeline.insert(0, build)
if self.type is None:
self.type = 'drupal.org'
except ValueError:
pass
def is_patch(self, config):
""" Check whether pipeline items resolves to a patch. """
ressource = Ressource(self.runner, config)
u = ressource.config['url']
return u.endswith('.diff') or u.endswith('.patch') or \
ressource.config.get('type') == 'patch'
@classmethod
def split_project(cls, name):
"""
Split a directory name into project, core-version, version and patches.
Patches should be separated from the main project string and one another
using a '+'.
"""
p = name.split('+')
name, extras = p[0], tuple(p[1:])
match = cls.package_pattern.fullmatch(name)
if match:
return match.group(1), match.group(2), match.group(3), extras
raise ValueError('Not a valid package string: "{}"'.format(name))
def isValid(self):
return self.type == 'drupal.org' and len(self.pipeline) >= 1
def convertToMake(self):
pkey = "projects[%s]" % self.project
print("%s[version] = %s" % (pkey, self.version))
pipeline = copy(self.pipeline)
pipeline.pop(0)
for config in pipeline:
ressource = Ressource(self.runner, config)
ressource.convertToMake(pkey + '[patch][]', True)
print()
|
gpl-3.0
| 1,028,903,512,370,132,700 | 31.412879 | 108 | 0.567372 | false |
MikeCurrington/LedPinky
|
main.py
|
1
|
4174
|
import xml.etree.ElementTree as ET
import ConfigParser
import SimpleHTTPServer
import SocketServer
import os
from LedWiz import LedWiz
from LedHttp import LedHttp
from gpio import ArcadeGpio
from GameData import GameData
from PinMap import PinMap
from DeviceManager import DeviceManager
from Sequencer import Sequencer
from SequenceLightChase import SequenceLightChase
from SequenceFlicker import SequenceFlicker
from SequenceLightSingle import SequenceLightSingle
from SequenceFadeUp import SequenceFadeUp
from SequenceGroup import SequenceGroup
pinMapping = PinMap('PinMap.xml')
ledwiz = LedWiz( )
ledwiz.Connect()
gpio = ArcadeGpio( pinMapping.GetAllPinsOfDevice('GPIO') )
devices = DeviceManager()
devices.Add( "LEDWIZ", ledwiz )
devices.Add( "GPIO", gpio )
devices.ClearPins()
gamedata = GameData( 'ColorsDefault.ini', 'Colors.ini', 'controls.xml' )
gamedata.run()
marqueeBrightness = 100
fanspeed = 0
def LoadMameOutputsIni( iniFilename ):
mappings = {}
ini = ConfigParser.RawConfigParser()
ini.optionxform = str # make case sensitive
ini.read(iniFilename)
if not ini.has_section("default"):
raise Exception('Need default section in mame outputs ini')
for game in ini.sections():
print game
outputs = ini.items(game)
mappings[game] = outputs
for pin,out in outputs:
print out
return mappings
#sequenceDemo = SequenceLightSingle( pinMapping.GetAllPinsOfGroupInOrder('PANEL') )
sequenceLeftDemoCircle = SequenceLightSingle( pinMapping.GetAllPinsOfGroupInOrder('LEFTSTICK')[::-1] )
sequenceLeftDemoCircle.SetDelay(0.2)
sequenceRightDemoCircle = SequenceLightSingle( pinMapping.GetAllPinsOfGroupInOrder('RIGHTSTICK') )
sequenceLeftDemoCircle.SetDelay(0.1)
sequenceDemo = SequenceGroup()
sequenceDemo.Add(sequenceLeftDemoCircle)
sequenceDemo.Add(sequenceRightDemoCircle)
sequenceDemo.SetDelay(0.1)
marqueeOn = SequenceFlicker( pinMapping.GetAllPinsOfGroup('MARQUEE') )
marqueeFade = SequenceFadeUp( pinMapping.GetAllPinsOfGroup('MARQUEE') )
sequenceGame = SequenceFadeUp( pinMapping.GetAllPinsOfGroup('PANEL') )
sequenceGame.SetTarget(1.0)
sequenceFan = SequenceFadeUp( pinMapping.GetAllPinsOfGroup('FAN') )
sequencer = Sequencer( devices )
sequencer.Add( sequenceDemo )
marqueeFade.SetTarget(1.0)
sequencer.Add( marqueeFade )
#sequencer.Add( marqueeOn )
sequencer.start()
ledwiz.ClearPins(False)
class HttpHandler:
def __init__(self):
self.ledhttp = LedHttp(self)
def StartServer(self):
self.ledhttp.StartServer()
def SetGame(self, gamename):
portsAndColors = gamedata.FindGamePortsAndColors( gamename )
portSettings = pinMapping.TranslatePortsAndColorsToPins( portsAndColors )
print portSettings
sequencer.Remove( sequenceDemo )
sequenceGame.SetOnPins(portSettings)
sequencer.Add( sequenceGame )
def SetMarqueeBrightness( self, brightness ):
#gpio.marqueeBrightness( brightness )
#marqueeBrightness = brightness
sequencer.Remove( marqueeOn )
sequencer.Remove( marqueeFade )
marqueeFade.SetTarget( float(brightness)/100.0 )
sequencer.Add( marqueeFade ) # this will put the fade to the head of the list - overriding the marqueeOn sequence
def SetFanSpeed( self, speed ):
sequencer.Remove( sequenceFan )
sequenceFan.SetTarget( float(speed)/100.0 )
sequencer.Add( sequenceFan )
def SetSleep( self, sleep ):
sequencer.Remove( sequenceFan )
if sleep==True:
sequencer.Remove( sequenceDemo )
sequencer.Remove( marqueeOn )
sequencer.Remove( sequenceGame )
marqueeFade.SetTarget( 0.0 )
sequenceFan.SetTarget( 0.0 )
#gpio.fanSpeed(0)
else:
sequencer.Add( sequenceDemo )
sequencer.Add( marqueeOn )
marqueeFade.SetTarget( 1.0 )
sequenceFan.SetTarget( 1.0 )
sequencer.Add( sequenceFan )
sequencer.Add( marqueeFade )
def SetDemo( self ):
sequencer.Remove( sequenceGame )
sequencer.Add( sequenceDemo )
ledhttp = HttpHandler()
ledhttp.StartServer()
mameOutputsFilename = '/tmp/sdlmame_out'
os.mkfifo(mameOutputsFilename)
mameOutputsFile = open(mameOutputsFilename, "r")
for nextfetch in mameOutputsFilename:
print nextfetch
|
mit
| 5,146,564,350,527,761,000 | 26.826667 | 117 | 0.758265 | false |
projectshift/shift-boiler
|
boiler/routes/lazy_views.py
|
1
|
1700
|
from werkzeug.utils import import_string, cached_property
class LazyView:
"""
Lazy view
Callable class that provides loading views on-demand as soon as they
are hit. This reduces startup times and improves general performance.
See flask docs for more:
http://flask.pocoo.org/docs/0.10/patterns/lazyloading/
"""
def __init__(self, import_name):
self.import_name = import_name
self.__module__,self.__name__ = import_name.rsplit('.', 1)
def __call__(self, *args, **kwargs):
""" Import and create instance of view """
# important issue ahead
# @see: https://github.com/projectshift/shift-boiler/issues/11
try:
result = self.view(*args, **kwargs)
return result
except ImportError:
err = 'Failed to import {}. If it exists, check that it does not '
err += 'import something non-existent itself! '
err += 'Try to manually import it to debug.'
raise ImportError(err.format(self.import_name))
@cached_property
def view(self):
result = import_string(self.import_name)
# do we have restfulness?
try:
from flask_restful import Resource
from boiler.feature.api import api
restful = True
except ImportError:
restful = False
# is classy?
if isinstance(result, type):
# and also restful?
is_restful = restful and Resource in result.__bases__
if is_restful:
result = api.output(result)
else:
result = result.as_view(self.import_name)
return result
|
mit
| -3,940,850,781,304,468,000 | 29.357143 | 78 | 0.583529 | false |
SEMAFORInformatik/femagtools
|
femagtools/forcedens.py
|
1
|
6880
|
# -*- coding: utf-8 -*-
"""
femagtools.forcedens
~~~~~~~~~~~~~~~~~~~~
Read Force Density Plot Files
"""
import os
import re
import glob
import numpy as np
import logging
logger = logging.getLogger('femagtools.forcedens')
filename_pat = re.compile(r'^(\w+)_(\d{3}).PLT(\d+)')
num_pat = re.compile(r'([+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?)\s*')
pos_pat = re.compile(r'^\s*POSITION\s*\[(\w+)\]')
unit_pat = re.compile(r'\[([^\]]+)')
def _readSections(f):
"""return list of PLT sections
sections are surrounded by lines starting with '[***'
or 2d arrays with 7 columns
Args:
param f (file) PLT file to be read
Returns:
list of sections
"""
section = []
for line in f:
if line.startswith('[****') or pos_pat.match(line):
if section:
if len(section) > 2 and section[1].startswith('Date'):
yield section[1:]
else:
yield section
if line.startswith('[****'):
section = []
else:
section = [line.strip()]
else:
section.append(line.strip())
yield section
class ForceDensity(object):
def __init__(self):
self.type = ''
self.positions = []
pass
def __read_version(self, content):
rec = content[0].split(' ')
if len(rec) > 3:
self.version = rec[3]
else:
self.version = rec[-1]
def __read_project_filename(self, content):
self.project = content[1].strip()
def __read_nodes_and_mesh(self, content):
self.nodes, self.elements, self.quality = \
[float(r[0]) for r in [num_pat.findall(l)
for l in content[:3]]]
for l in content[3:]:
m = re.match(r'\*+([^\*]+)\*+', l)
if m:
self.type = m.group(1).strip()
return
def __read_date_and_title(self, content):
d = content[0].split(':')[1].strip().split()
dd, MM, yy = d[0].split('.')
hh, mm = ''.join(d[1:-1]).split('.')
self.date = '{}-{}-{}T{:02}:{:02}'.format(
yy, MM, dd, int(hh), int(mm))
if len(content) > 6:
self.title = content[2].strip() + ', ' + content[6].strip()
else:
self.title = content[2].strip()
self.current = float(num_pat.findall(content[4])[0])
def __read_filename(self, content):
self.filename = content[0].split(':')[1].strip()
def __read_position(self, content):
d = dict(position=float(num_pat.findall(content[0])[-1]),
unit=unit_pat.findall(content[0].split()[1])[0])
cols = content[2].split()
labels = cols[::2] # either X, FN, FT, B_N, B_T
# or X FX FY B_X B_Y
d['column_units'] = {k: u for k, u in zip(labels,
[unit_pat.findall(u)[0]
for u in cols[1::2]])}
m = []
for l in content[4:]:
rec = l.split()[1:]
if len(rec) == len(labels):
m.append([float(x) for x in rec])
d.update({k: v for k, v in zip(labels, list(zip(*m)))})
self.positions.append(d)
def read(self, filename):
with open(filename) as f:
for s in _readSections(f.readlines()):
logger.debug('Section %s' % s[0:2])
if s[0].startswith('FEMAG'):
self.__read_version(s)
elif s[0].startswith('Project'):
self.__read_project_filename(s)
elif s[0].startswith('Number'):
self.__read_nodes_and_mesh(s)
elif s[0].startswith('File'):
self.__read_filename(s)
elif s[0].startswith('Date'):
self.__read_date_and_title(s)
elif s[0].startswith('POSITION'):
self.__read_position(s)
def fft(self):
"""return FFT of FN"""
import scipy.fftpack
try:
ntiles = int(360/self.positions[0]['X'][-1])
FN = np.tile(
np.array([p['FN'][:-1] for p in self.positions[:-1]]),
(ntiles, ntiles))
except AttributeError:
return []
N = FN.shape[0]
fdn = scipy.fftpack.fft2(FN)
dim = N//ntiles//2
return np.abs(fdn)[1:dim, 1:dim]/N
def items(self):
return [(k, getattr(self, k)) for k in ('version',
'type',
'title',
'current',
'filename',
'date',
'positions')]
def __str__(self):
"return string format of this object"
if self.type:
return "\n".join([
'FEMAG {}: {}'.format(self.version, self.type),
'File: {} {}'.format(self.filename, self.date)] +
['{}: {}'.format(k, v)
for k, v in self.items()])
return "{}"
def read(filename):
f = ForceDensity()
f.read(filename)
return f
def readall(workdir='.'):
"""collect all recent PLT files
returns list of ForceDensity objects
"""
plt = dict()
pltfiles = sorted(glob.glob(os.path.join(workdir, '*_*.PLT*')))
base = os.path.basename(pltfiles[-1])
lastserie = filename_pat.match(base).groups()[1]
for p in pltfiles:
base = os.path.basename(p)
m = filename_pat.match(base)
if m and lastserie == m.groups()[1]:
model, i, k = m.groups()
fdens = ForceDensity()
fdens.read(p)
logging.info("%s: %s", p, fdens.title)
if model in plt:
plt[model].append(fdens)
else:
plt[model] = [fdens]
return plt
if __name__ == "__main__":
import matplotlib.pyplot as pl
import femagtools.plot
import sys
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = sys.stdin.readline().strip()
fdens = read(filename)
# Show the results
title = '{}, Rotor position {}'.format(
fdens.title, fdens.positions[0]['position'])
pos = fdens.positions[0]['X']
FT_FN = (fdens.positions[0]['FT'],
fdens.positions[0]['FN'])
femagtools.plot.forcedens(title, pos, FT_FN)
pl.show()
title = 'Force Density Harmonics'
femagtools.plot.forcedens_fft(title, fdens)
pl.show()
|
bsd-2-clause
| -4,452,825,694,495,662,000 | 29.990991 | 73 | 0.459738 | false |
xzturn/caffe2
|
caffe2/python/core_gradients_test.py
|
1
|
34409
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import bytes_to_native_str
from hypothesis import given
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util
from caffe2.python.core import CreateOperator, GradientRegistry
from caffe2.python import workspace
import numpy as np
# First, we will set up a few gradient registry entries so that we can manually
# construct some test cases.
def NeedAll(op, g_output):
"""A sanity check to make sure that all the gradient are given."""
for name, g in zip(op.output, g_output):
if g is None:
raise RuntimeError(
'Need gradient for "%s" but it is not provided.' % name)
return g_output
def GIS(op):
"""A test util function to generate the gradient name for input."""
return [s + '_grad' for s in op.input]
def CopyDeviceOption(op, src_op):
if src_op.HasField('device_option'):
op.device_option.CopyFrom(src_op.device_option)
return op
# First gradient: (in -> out) leading to (out_grad -> in_grad)
@GradientRegistry.RegisterGradient('Direct')
def AddDirectGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator('DirectGradient', NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
# Second gradient: (in -> out) leading to (out, out_grad -> in_grad)
@GradientRegistry.RegisterGradient('UseOutput')
def AddUseOutputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseOutputGradient',
list(op.output) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('UseInput')
def AddUseInputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseInputGradient',
list(op.input) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('Nogradient')
def AddNogradient(op, g_output):
return (
[],
[None for s in op.input]
)
class TestGradientCalculation(test_util.TestCase):
def assertOperatorListEqual(self, operatorDefList1, operatorDefList2):
for op in operatorDefList1:
op.debug_info = ""
for op in operatorDefList2:
op.debug_info = ""
self.assertEqual(operatorDefList1, operatorDefList2)
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testDirect(self, device_option):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDirectImplicitGradientSource(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator(
"ConstantFill", 'out', "out_autogen_grad", value=1.0),
CreateOperator(
'DirectGradient', 'out_autogen_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
for op in desired_grad_operators:
op.debug_info = ""
gradients, _ = GradientRegistry.GetBackwardPass(
operators, ['out'])
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDoesNotGenerateUnnecessaryGradients(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
for op in desired_grad_operators:
op.debug_info = ""
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'hidden': 'hidden_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDirectButNoOutputGradientGiven(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {})
self.assertOperatorListEqual(gradients, [])
def testDirectInPlace(self):
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'in_grad'),
CreateOperator('DirectGradient', 'in_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testVersionMismatch(self):
operators = [
CreateOperator('Direct', 'x', 'x'),
CreateOperator('Direct', 'y', 'x'),
CreateOperator('Direct', 'x', 'y'),
]
try:
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'y': 'y_grad'})
self.assertFalse(True, "Should raise exception of incorrect version")
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
pass
def testUseOutput(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'UseOutputGradient',
['hidden', 'hidden_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseOutputInPlace(self):
operators = [
CreateOperator('UseOutput', 'in', 'in'),
CreateOperator('UseOutput', 'in', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'in_grad'
),
CreateOperator(
'UseOutputGradient',
['in', 'in_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseOutputButOutputHasBeenChanged(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
# Note here: we overwrite hidden, but hidden will be needed by the
# gradient calculation of the first operator, so the gradient
# registry should return an error.
CreateOperator('Direct', 'hidden', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
def testUseInput(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('UseInput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseInputGradient',
['hidden', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'DirectGradient',
'hidden_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseInputButInputHasBeenChanged(self):
"""Test gradient for the following case:
in -> out, with UseInput
in -> in
Since we overwrite in in op#1, but in will be needed by the gradient
calculation of op#0, the gradient registry should raise an error.
"""
operators = [
CreateOperator('UseInput', 'in', 'out'),
CreateOperator('Direct', 'in', 'in'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testMultiUseInput(self, device_option):
"""Test gradient for the following case:
in -> hidden1
in -> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {"out": "out_grad"})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputButWithNoGradient(self):
"""Test gradient for the following case:
in -> hidden1
in -(no gradient)-> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Nogradient', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden1_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersions(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersionsBig(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> in
in -> hidden3, hidden4, hidden5
hidden3, hidden4, hidden5 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'in'),
CreateOperator('Direct', 'in', 'hidden3'),
CreateOperator('Direct', 'in', 'hidden4'),
CreateOperator('Direct', 'in', 'hidden5'),
CreateOperator('Direct', ['hidden3', 'hidden4', 'hidden5'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden3_grad', 'hidden4_grad', 'hidden5_grad']
),
CreateOperator(
'DirectGradient',
'hidden5_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden4_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden3_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0',
'_in_grad_autosplit_1'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
for s in gradients:
print(str(s))
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testGradientMappingUsingSumOp(self):
"""Since Sum is used in accumulating gradients, we will test if
it is OK to also explicitly use it in the graph."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Sum', 'fc', 'agg'),
CreateOperator('AveragedLoss', 'agg', 'loss'),
]
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
def testGradientCalculationWithPrint(self):
"""Test a common use case where we have Print in the forward pass."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Print', 'fc', []),
CreateOperator('AveragedLoss', 'fc', 'loss'),
]
desired_grad_operators = [
CreateOperator('AveragedLossGradient',
['fc', 'loss_grad'], 'fc_grad'),
CreateOperator('FCGradient', ['in', 'w', 'fc_grad'],
['w_grad', 'b_grad', 'in_grad']),
]
for g in desired_grad_operators:
g.is_gradient_op = 1
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
self.assertOperatorListEqual(gradient_ops, desired_grad_operators)
def testStopGradient(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden2'),
CreateOperator('Direct', 'hidden2', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden2_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testStopGradientOrphan(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'auto_blobx'),
CreateOperator('Direct', 'hidden', 'out'),
]
with self.assertRaises(ValueError):
# This should complain about incorrect use of StopGradient
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
def testStopGradientInplace(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
self.assertEqual(grad_map, {'out': 'out_grad'})
def testStopGradientWithMultiUseOperators(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'hidden2'),
CreateOperator('StopGradient', 'hidden', 'hidden3'),
CreateOperator('Direct', ['hidden2', 'hidden3'], 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad',
['hidden2_grad', 'hidden3_grad']),
CreateOperator('DirectGradient', 'hidden2_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
self.assertEqual(
grad_map, {'out': 'out_grad', 'hidden2': 'hidden2_grad',
'hidden3': 'hidden3_grad', 'hidden': 'hidden_grad',
'in': 'in_grad'})
def test_zero_gradient(self):
net = core.Net("zero_grad_test")
hidden_prev, cell, gates, seq_lengths, timestep =\
net.AddExternalInput("h", "c", "g", "s", "t")
hidden, cell = net.LSTMUnit(
[hidden_prev, cell, gates, seq_lengths, timestep],
["hidden_t", "cell_t"])
with self.assertRaises(Exception):
net.AddGradientOperators([hidden])
net.ZeroGradient(cell, [])
net.AddGradientOperators([hidden])
def test_two_grads(self):
net = core.Net("test_two_grads")
input, two, three = net.AddExternalInput("input", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([m1, three], "mul_2")
grad_map = net.AddGradientOperators([m2, m1])
workspace.ResetWorkspace()
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print(net.Proto())
for blob in workspace.blobs:
print(blob, workspace.blobs[blob])
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 8.0
# Skip if sparse operators are not available
@unittest.skipIf(not core.IsOperator('SparseFunHash'),
'Sparse operators not available')
class TestSparseGradientsAccumulation(test_util.TestCase):
def testSparseAccumulationWithValues(self):
# The gradient for "Gather" only computes values. indices are directly
# passed from the input
#
# x1-->Gather-->x4-->
# | |
# x2-----+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "x3")
self.assertEqual(sum_op_i.input[1], "x1")
self.assertEqual(sum_op_i.output[0], "x2_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "x5_grad")
self.assertEqual(sum_op_v.input[1], "x4_grad")
self.assertEqual(sum_op_v.output[0], "x2_grad_values_concat")
def testSparseGradientToDense(self):
#
# x1-->Gather-->x4-->
# | |
# x0, w, b-->FC-->x2-->EnsureDenseGradient-->x2---+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.FC(["x0", "w", "b"], "x2")
net.EnsureDense(["x2"], "x2")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
ensure_dense_op = net.Proto().op[-2]
self.assertEqual(ensure_dense_op.input[0], "x2_grad_indices_concat")
self.assertEqual(ensure_dense_op.input[1], "x2_grad_values_concat")
self.assertEqual(ensure_dense_op.output[0], "x2_grad")
def testSparseAccumulationWithIndicesAndValues(self):
# The gradient for "SparseFunHash" computes both indices and values
#
# x1-------->
# |
# x2----> |
# | |
# x3---SparseFunHash-->x8
# / \
# x4---+ DotProduct-->x10
# \ /
# x5---SparseFunHash-->x9
# | |
# x6----> |
# |
# x7-------->
net = core.Net("test_net")
net.SparseFunHash(["x1", "x2", "x3", "x4"], "x8")
net.SparseFunHash(["x5", "x6", "x7", "x4"], "x9")
net.DotProduct(["x8", "x9"], "x10")
net.AddGradientOperators(["x10"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "_x4_grad_indices_autosplit_0")
self.assertEqual(sum_op_i.input[1], "_x4_grad_indices_autosplit_1")
self.assertEqual(sum_op_i.output[0], "x4_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "_x4_grad_values_autosplit_0")
self.assertEqual(sum_op_v.input[1], "_x4_grad_values_autosplit_1")
self.assertEqual(sum_op_v.output[0], "x4_grad_values_concat")
class TestGradientsAccumulationWithNoGradientOps(test_util.TestCase):
def testNormalAccumulation(self):
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
def testAccumulationWithNoGradientBranch(self):
# -->PRINT
# |
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Print("x2", [])
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
class TestGradientsAccumulationWithPassThroughGradients(test_util.TestCase):
def testAddOpInMiddle(self):
# x1-->Relu--x2----------------->Add-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<---------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Add(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndDynamicConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill(["x2"], ["x3"])
net.Add(["x2", "x3"], "x4")
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
for op in net.Proto().op:
self.assertFalse(op.type == 'Sum')
self.assertTrue("x4" in input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndStaticConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill([], ["x3"], shape=[1])
net.Add(["x2", "x3"], "x4", broadcast=1)
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
print(input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testSubOpInMiddle(self):
# x1-->Relu--x2----------------->Sub-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<-----------------------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG<--x3_g<--neg
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Sub(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
print(str(net.Proto()))
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddOpAtLeaf(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<--x4_g<--DotProductG<--x6_g
# | | |
# <---x5_g<-------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x5_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x5_grad")
def testSubOpAtLeaf(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<-------Sum<--x2_g_split_0<--neg<--x4_g<--DotProductG<--x6_g
# | |
# x3_g<--neg<--<--x5_g<--------------------------------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x5_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testMultiLayerAddOps(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->Add-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.Add(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x6_grad")
self.assertEqual(sum_op.input[1], "x6_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x6_grad")
def testMultiLayerSubOps(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->Sub-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.Sub(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x5_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testAccumulationRuns(self):
net = core.Net("test_net")
input, one, two, three = net.AddExternalInput(
"input", "one", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([input, three], "mul_2")
sub = net.Sub([m1, one])
grad_map = net.AddGradientOperators([m2, sub])
workspace.ResetWorkspace()
workspace.blobs[one] = np.array([1]).astype(np.float32)
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 5.0
def testIncorrectOperator(self):
net = core.Net("test_net")
a, b, one = net.AddExternalInput("a", "b", "one")
m1 = net.Mul(a, b) # does not have second output
sub = net.Sub([m1, one])
try:
net.AddGradientOperators([sub])
self.assertFalse(True, "Did not throw exception")
except Exception as e:
self.assertTrue("schema" in str(e))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -3,141,119,914,985,864,700 | 36.523446 | 81 | 0.513703 | false |
Towellang/Fabric
|
towel.py
|
1
|
3116
|
#!/usr/bin/env python2
import sys
import random
from string import join
STACK = []
LOOPS = [] # Loops are stacks too
PNT = 0
BREAK = False # Break mode
ENV = {
"[": lambda: LOOPS.append(PNT),
"]": lambda: loop(),
"drop": lambda: STACK.pop(),
"dup": lambda: push(str(STACK[-1])) if STACK != [] else panic("Tried dupping on an empty stack"),
"rand": lambda: push(str(random.randint(0, 99))), # Converted to string so push doesn't lose its shit
"dump": lambda: sys.stdout.write(str(STACK)),
"rev": lambda: STACK.reverse(),
"raise": lambda: STACK.insert(0, STACK.pop()),
"lower": lambda: STACK.insert(len(STACK), STACK.pop(0)),
"add": lambda: push(STACK.pop() + STACK.pop()),
"+": lambda: push(STACK.pop() + STACK.pop()),
"sub": lambda: push(-1 * STACK.pop() + STACK.pop()),
"-": lambda: push(-1 * STACK.pop() + STACK.pop()),
"/": lambda: push(STACK.pop() / STACK.pop()),
"put": lambda: sys.stdout.write( str( STACK.pop() ) ),
"chr": lambda: sys.stdout.write( str( chr( STACK.pop() ) ) ),
"=": lambda: logic(True) if STACK.pop() == STACK.pop() else logic(False),
">": lambda: logic(True) if STACK.pop() > STACK.pop() else logic(False),
">=": lambda: logic(True) if STACK.pop() >= STACK.pop() else logic(False),
"<": lambda: logic(True) if STACK.pop() < STACK.pop() else logic(False),
"<=": lambda: logic(True) if STACK.pop() <= STACK.pop() else logic(False),
"swap": lambda: swap(),
"clone": lambda: STACK.extend(STACK),
"grab": lambda: push(raw_input("")),
"break": lambda: breakmode(),
"end": lambda: leave()
}
def push(value): # ord & chr
try: int(value)
except:
word = value[1:-1]
for k,v in enumerate(word):
STACK.append(ord(word[k]))
else:
STACK.append(int(value))
def loop():
global STACK, LOOPS, PNT
if STACK == [] or STACK[-1] == 0:
LOOPS.pop()
else:
PNT = LOOPS[-1]
def breakmode():
global BREAK
BREAK = True
def swap():
STACK[-1], STACK[-2] = STACK[-2], STACK[-1]
def logic(bvar):
global PNT
if bvar == False:
PNT += 1
def prep(rawcode):
for k,v in enumerate(rawcode):
if v.startswith("#!") or v.startswith("//"):
rawcode.pop(k)
rawcode = " ".join(rawcode)
return rawcode.split(" ")
def interpret(code):
global STACK, LOOPS, PNT, BREAK
while PNT < len(code):
word = code[PNT]
if BREAK:
if word == "]":
BREAK = False
else:
if word == "":
PNT += 1
continue
if word.startswith("\"") and word.endswith("\""):
push(word)
elif word in ENV.keys():
ENV[word]()
elif word == "_SLEN":
push(len(STACK))
else:
try: int(word)
except: panic("Unknown command or broken value")
else: push(word)
PNT += 1
def repl():
print("Welcome to the REPL")
code = []
while True:
code += prep(raw_input(">>> ").split(" "))
interpret(prep(code))
def panic(msg):
print("ERROR: " + msg + " on instruction #" + str(PNT + 1) + " (" + code[PNT] + ")")
sys.exit(1)
def leave():
print # Trailing newline to fix command prompt
sys.exit(0)
if __name__ == "__main__":
try:
sys.argv[1]
except:
repl()
else:
code = prep(open(sys.argv[1]).read().split("\n"))
interpret(code)
leave()
|
gpl-3.0
| -3,585,480,334,938,244,600 | 23.155039 | 102 | 0.603338 | false |
mrunge/openstack_horizon
|
openstack_horizon/dashboards/project/data_processing/job_executions/tables.py
|
1
|
4946
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils import http
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import tables
from openstack_horizon.api import sahara as saharaclient
from openstack_horizon.dashboards.project.data_processing. \
jobs import tables as j_t
LOG = logging.getLogger(__name__)
class DeleteJobExecution(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Job execution",
u"Delete Job executions",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Job execution",
u"Deleted Job executions",
count
)
name = "delete"
classes = ('btn-danger', 'btn-terminate')
def action(self, request, obj_id):
saharaclient.job_execution_delete(request, obj_id)
class ReLaunchJobExistingCluster(j_t.ChoosePlugin):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Launch Job",
u"Launch Jobs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Launched Job",
u"Launched Jobs",
count
)
name = "relaunch-job-existing"
verbose_name = _("Relaunch On Existing Cluster")
url = "horizon:project:data_processing.jobs:launch-job"
classes = ('ajax-modal', 'btn-launch')
def get_link_url(self, datum):
base_url = reverse(self.url)
params = http.urlencode({'job_id': datum.job_id,
'job_execution_id': datum.id})
return "?".join([base_url, params])
class ReLaunchJobNewCluster(ReLaunchJobExistingCluster):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Launch Job",
u"Launch Jobs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Launched Job",
u"Launched Jobs",
count
)
name = "relaunch-job-new"
verbose_name = _("Relaunch On New Cluster")
url = "horizon:project:data_processing.jobs:choose-plugin"
classes = ('ajax-modal', 'btn-launch')
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, job_execution_id):
job_execution = saharaclient.job_execution_get(request,
job_execution_id)
return job_execution
def get_job_link(job_execution):
return reverse("horizon:project:data_processing.jobs:details",
args=(http.urlquote(job_execution.job_id),))
def get_cluster_link(job_execution):
return reverse("horizon:project:data_processing.clusters:details",
args=(http.urlquote(job_execution.cluster_id),))
class JobExecutionsTable(tables.DataTable):
class StatusColumn(tables.Column):
def get_raw_data(self, datum):
return datum.info['status']
STATUS_CHOICES = (
("DONEWITHERROR", False),
("FAILED", False),
("KILLED", False),
("SUCCEEDED", True),
)
name = tables.Column("id",
verbose_name=_("ID"),
display_choices=(("id", "ID"), ("name", "Name")),
link=("horizon:project:data_processing."
"job_executions:details"))
job_name = tables.Column(
"job_name",
verbose_name=_("Job"),
link=get_job_link)
cluster_name = tables.Column(
"cluster_name",
verbose_name=_("Cluster"),
link=get_cluster_link)
status = StatusColumn("info",
status=True,
status_choices=STATUS_CHOICES,
verbose_name=_("Status"))
def get_object_display(self, datum):
return datum.id
class Meta:
name = "job_executions"
row_class = UpdateRow
status_columns = ["status"]
verbose_name = _("Job Executions")
table_actions = [DeleteJobExecution]
row_actions = [DeleteJobExecution,
ReLaunchJobExistingCluster,
ReLaunchJobNewCluster]
|
apache-2.0
| 407,357,235,437,798,500 | 28.795181 | 74 | 0.598868 | false |
Diti24/python-ivi
|
ivi/agilent/agilent8156A.py
|
1
|
8762
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
class agilent8156A(ivi.Driver):
"Agilent 8156A optical attenuator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent8156A, self).__init__(*args, **kwargs)
self._identity_description = "Agilent 8156A optical attenuator driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['8156A']
self._attenuation = 0.0
self._offset = 0.0
self._wavelength = 1300.0
self._disable = False
self._add_property('attenuation',
self._get_attenuation,
self._set_attenuation,
None,
ivi.Doc("""
Specifies the attenuation of the optical path. The units are dB.
"""))
self._add_property('offset',
self._get_offset,
self._set_offset,
None,
ivi.Doc("""
Specifies the offset level for the attenuation setting. The units are dB.
"""))
self._add_property('wavelength',
self._get_wavelength,
self._set_wavelength,
None,
ivi.Doc("""
Specifies the wavelength of light used for accurate attenuation. The
units are meters.
"""))
self._add_property('disable',
self._get_disable,
self._set_disable,
None,
ivi.Doc("""
Controls a shutter in the optical path. Shutter is closed when disable is
set to True.
"""))
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent8156A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code, error_message = self._ask(":system:error?").split(',')
error_code = int(error_code)
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
code = int(self._ask("*TST?"))
if code != 0:
message = "Self test failed"
return (code, message)
def _utility_unlock_object(self):
pass
def _get_attenuation(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("input:attenuation?")
self._attenuation = float(resp)
self._set_cache_valid()
return self._attenuation
def _set_attenuation(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("input:attenuation %e" % (value))
self._attenuation = value
self._set_cache_valid()
def _get_offset(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("input:offset?")
self._offset = float(resp)
self._set_cache_valid()
return self._offset
def _set_offset(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("input:offset %e" % (value))
self._offset = value
self._set_cache_valid()
self._set_cache_valid(False, 'attenuation')
def _get_wavelength(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("input:wavelength?")
self._wavelength = float(resp)
self._set_cache_valid()
return self._wavelength
def _set_wavelength(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("input:wavelength %e" % (value))
self._wavelength = value
self._set_cache_valid()
def _get_disable(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("output:state?")
self._disable = bool(int(not resp))
self._set_cache_valid()
return self._disable
def _set_disable(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("output:state %d" % (int(not value)))
self._disable = value
self._set_cache_valid()
|
mit
| -8,041,004,866,283,276,000 | 36.767241 | 99 | 0.589249 | false |
cwgreene/Nanostructure-Simulator
|
kdtree_c.py
|
1
|
3883
|
import ctypes
import time #test only
#init
kdtree = ctypes.cdll.LoadLibrary("c_optimized/kdtree.so")
kdtree3 = ctypes.cdll.LoadLibrary("c_optimized/kdtree3.so")
kdtree.call_this()
#types
vector2 = ctypes.c_double*2
vector2p = ctypes.c_double*3
vector3 = ctypes.c_double*3
vector3p = ctypes.c_double*4
kdtree_p = ctypes.pointer
#functions
kdtree.new_kdtree.restype = ctypes.POINTER(ctypes.c_int)
kdtree.find_point_r.restype = ctypes.POINTER(vector2)
kdtree.kdtree_find_point.restype = ctypes.POINTER(vector2)
def longest_chain(kdtree,depth,maxdepth):
return kdtree.longest_chain(kdtree,depth,maxdepth)
def new_kdtree(points):
points_copy = []
for point in points:
points_copy.append(list(point))
for id in xrange(len(points_copy)):
points_copy[id].append(id*1.)
vec_points = vector2p*len(points_copy)
vecs = []
for point in points_copy:
#print point
vecs.append(vector2p(*point))
vec_points = vec_points(*vecs)
kd_p = kdtree.new_kdtree(vec_points,len(points),0)
#print type(kd_p)
return kd_p
def new_kdtree3(points):
points_copy = []
for point in points:
points_copy.append(list(point))
for id in xrange(len(points_copy)):
points_copy[id].append(id*1.)
vec_points = vector3p*len(points_copy)
vecs = []
for point in points_copy:
#print point
vecs.append(vector3p(*point))
vec_points = vec_points(*vecs)
kd_p = kdtree3.new_kdtree3(vec_points,len(points),0)
#print type(kd_p)
return kd_p
acc = 0.
def find_point(kd,point):
global acc
best = vector2(100000,1000000)
x = vector2(*point)
bdist = ctypes.pointer(ctypes.c_double(1000000))
#res = kdtree.kdtree_find_point(kd,x)
#res = kdtree.find_point_r(x,kd,best,bdist)
# start = time.time()
res = kdtree.kdtree_find_point(kd,x)
# acc += time.time()-start
#print type(res)
return res
def find_point_id(kd,point):
global acc
best = vector2(100000,1000000)
x = vector2(*point)
bdist = ctypes.pointer(ctypes.c_double(1000000))
#res = kdtree.kdtree_find_point(kd,x)
#res = kdtree.find_point_r(x,kd,best,bdist)
id = kdtree.kdtree_find_point_id(kd,x)
#print type(res)
return id
def find_point3_id(kd3,point):
global acc
best = vector3(100000,1000000,1000000)
x = vector3(*point)
bdist = ctypes.pointer(ctypes.c_double(1000000))
#res = kdtree.kdtree_find_point(kd,x)
#res = kdtree.find_point_r(x,kd,best,bdist)
id = kdtree3.kdtree_find_point3_id(kd,x)
#print type(res)
return id
def test():
import time
import random
global acc
points = [[random.random()*10,random.random()*10] for x in range(10000)]
start = time.time()
kd = new_kdtree(points)
print "construct:",time.time()-start
start = time.time()
for point in points:
res =find_point(kd,point).contents
if res[0] == point[0] and res[1] == point[1]:
pass
#print "success!"
else:
print "Unhappiness!"
break
print "exact:",time.time()-start
points2 = []
start = time.time()
for point in points:
point2 = [0,0]
point2[0] = point[0]+.00000001*random.random()
point2[1] = point[1]+.00000001*random.random()
points2.append(point2)
print "near_points construct:",time.time()-start
start = time.time()
for point2,point in zip(points2,points):
res = find_point(kd,point2).contents
if res[0]==point[0] and res[1] == point[1]:
pass
else:
print "unhappiness 2"
print "near_points:",time.time()-start
start = time.time()
for index in xrange(len(points)):
id = find_point_id(kd,points2[index])
if id != index:
print "Unhappiness 3"
print "index:",time.time()-start
z=[(random.random(),random.random()) for x in range(200000)]
start = time.time()
acc = 0
for point in z:
id = find_point_id(kd,point)
print "random_lookup:",time.time()-start,acc
start = time.time()
acc = 0
for point in z:
kdtree.do_nothing(3)
kdtree.do_nothing(3)
print "do_nothing:",time.time()-start,acc
print "all done"
if __name__ == "__main__":
print "Testing"
test()
|
mit
| -5,357,662,318,989,151,000 | 24.546053 | 73 | 0.692506 | false |
libvirt/autotest
|
client/virt/tests/nfs_corrupt.py
|
1
|
8531
|
import logging, os, re
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils, os_dep
from autotest_lib.client.virt import virt_utils
from autotest_lib.client.virt import virt_env_process
class NFSCorruptConfig(object):
"""
This class sets up nfs_corrupt test environment.
"""
def __init__(self, test, params):
self.nfs_dir = os.path.join(test.tmpdir, "nfs_dir")
self.mnt_dir = os.path.join(test.tmpdir, "mnt_dir")
self.chk_re = params.get("nfs_stat_chk_re", "running")
cmd_list = self._get_service_cmds()
self.start_cmd = cmd_list[0]
self.stop_cmd = cmd_list[1]
self.restart_cmd = cmd_list[2]
self.status_cmd = cmd_list[3]
@error.context_aware
def _get_service_cmds(self):
"""
Figure out the commands used to control the NFS service.
"""
error.context("Finding out appropriate commands to handle NFS service")
service = os_dep.command("service")
try:
systemctl = os_dep.command("systemctl")
except ValueError:
systemctl = None
if systemctl is not None:
init_script = "/etc/init.d/nfs"
service_file = "/lib/systemd/system/nfs-server.service"
if os.path.isfile(init_script):
service_name = "nfs"
elif os.path.isfile(service_file):
service_name = "nfs-server"
else:
raise error.TestError("Files %s and %s absent, don't know "
"how to set up NFS for this host" %
(init_script, service_file))
start_cmd = "%s start %s.service" % (systemctl, service_name)
stop_cmd = "%s stop %s.service" % (systemctl, service_name)
restart_cmd = "%s restart %s.service" % (systemctl, service_name)
status_cmd = "%s status %s.service" % (systemctl, service_name)
else:
start_cmd = "%s nfs start" % service
stop_cmd = "%s nfs stop" % service
restart_cmd = "%s nfs restart" % service
status_cmd = "%s nfs status" % service
return [start_cmd, stop_cmd, restart_cmd, status_cmd]
@error.context_aware
def setup(self, force_start=False):
"""
Setup test NFS share.
@param force_start: Whether to make NFS service start anyway.
"""
error.context("Setting up test NFS share")
for d in [self.nfs_dir, self.mnt_dir]:
try:
os.makedirs(d)
except OSError:
pass
if force_start:
self.start_service()
else:
if not self.is_service_active():
self.start_service()
utils.run("exportfs localhost:%s -o rw,no_root_squash" % self.nfs_dir)
utils.run("mount localhost:%s %s -o rw,soft,timeo=1,retrans=1,vers=3" %
(self.nfs_dir, self.mnt_dir))
@error.context_aware
def cleanup(self, force_stop=False):
error.context("Cleaning up test NFS share")
utils.run("umount %s" % self.mnt_dir)
utils.run("exportfs -u localhost:%s" % self.nfs_dir)
if force_stop:
self.stop_service()
def start_service(self):
"""
Starts the NFS server.
"""
utils.run(self.start_cmd)
def stop_service(self):
"""
Stops the NFS server.
"""
utils.run(self.stop_cmd)
def restart_service(self):
"""
Restarts the NFS server.
"""
utils.run(self.restart_cmd)
def is_service_active(self):
"""
Verifies whether the NFS server is running or not.
@param chk_re: Regular expression that tells whether NFS is running
or not.
"""
status = utils.system_output(self.status_cmd, ignore_status=True)
if re.findall(self.chk_re, status):
return True
else:
return False
@error.context_aware
def run_nfs_corrupt(test, params, env):
"""
Test if VM paused when image NFS shutdown, the drive option 'werror' should
be stop, the drive option 'cache' should be none.
1) Setup NFS service on host
2) Boot up a VM using another disk on NFS server and write the disk by dd
3) Check if VM status is 'running'
4) Reject NFS connection on host
5) Check if VM status is 'paused'
6) Accept NFS connection on host and continue VM by monitor command
7) Check if VM status is 'running'
@param test: kvm test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
def get_nfs_devname(params, session):
"""
Get the possbile name of nfs storage dev name in guest.
@param params: Test params dictionary.
@param session: An SSH session object.
"""
image1_type = params.object_params("image1").get("drive_format")
stg_type = params.object_params("stg").get("drive_format")
cmd = ""
# Seems we can get correct 'stg' devname even if the 'stg' image
# has a different type from main image (we call it 'image1' in
# config file) with these 'if' sentences.
if image1_type == stg_type:
cmd = "ls /dev/[hsv]d[a-z]"
elif stg_type == "virtio":
cmd = "ls /dev/vd[a-z]"
else:
cmd = "ls /dev/[sh]d[a-z]"
cmd += " | tail -n 1"
return session.cmd_output(cmd)
def check_vm_status(vm, status):
"""
Check if VM has the given status or not.
@param vm: VM object.
@param status: String with desired status.
@return: True if VM status matches our desired status.
@return: False if VM status does not match our desired status.
"""
try:
vm.verify_status(status)
except:
return False
else:
return True
config = NFSCorruptConfig(test, params)
config.setup()
params["image_name_stg"] = os.path.join(config.mnt_dir, 'nfs_corrupt')
params["force_create_image_stg"] = "yes"
params["create_image_stg"] = "yes"
stg_params = params.object_params("stg")
virt_env_process.preprocess_image(test, stg_params)
vm = env.get_vm(params["main_vm"])
vm.create(params=params)
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
nfs_devname = get_nfs_devname(params, session)
# Write disk on NFS server
write_disk_cmd = "dd if=/dev/urandom of=%s" % nfs_devname
logging.info("Write disk on NFS server, cmd: %s" % write_disk_cmd)
session.sendline(write_disk_cmd)
try:
# Read some command output, it will timeout
session.read_up_to_prompt(timeout=30)
except:
pass
try:
error.context("Make sure guest is running before test")
vm.resume()
vm.verify_status("running")
try:
cmd = "iptables"
cmd += " -t filter"
cmd += " -A INPUT"
cmd += " -s localhost"
cmd += " -m state"
cmd += " --state NEW"
cmd += " -p tcp"
cmd += " --dport 2049"
cmd += " -j REJECT"
error.context("Reject NFS connection on host")
utils.system(cmd)
error.context("Check if VM status is 'paused'")
if not virt_utils.wait_for(
lambda: check_vm_status(vm, "paused"),
int(params.get('wait_paused_timeout', 120))):
raise error.TestError("Guest is not paused after stop NFS")
finally:
error.context("Accept NFS connection on host")
cmd = "iptables"
cmd += " -t filter"
cmd += " -D INPUT"
cmd += " -s localhost"
cmd += " -m state"
cmd += " --state NEW"
cmd += " -p tcp"
cmd += " --dport 2049"
cmd += " -j REJECT"
utils.system(cmd)
error.context("Continue guest")
vm.resume()
error.context("Check if VM status is 'running'")
if not virt_utils.wait_for(lambda: check_vm_status(vm, "running"), 20):
raise error.TestError("Guest does not restore to 'running' status")
finally:
session.close()
vm.destroy(gracefully=True)
config.cleanup()
|
gpl-2.0
| 939,982,799,174,097,400 | 32.454902 | 79 | 0.558668 | false |
GEMScienceTools/oq-subduction
|
openquake/sub/grid3d.py
|
1
|
6450
|
"""
:module:
"""
import rtree
import numpy as np
from pyproj import Proj
from scipy.interpolate import griddata
from shapely.geometry import Point
from openquake.sub.misc.alpha_shape import alpha_shape
def generator_function(mesh):
"""
Generator function for quick loading of a 3D spatial index
:param mesh:
An instance of :class:`~openquake.hazardlib.geo.mesh.Mesh`
"""
#
lo = mesh.lons.flatten()
la = mesh.lats.flatten()
de = mesh.depths.flatten()
#
idxs = np.nonzero(np.isfinite(de))
#
for i in idxs[0]:
if i:
yield (i, (lo[i], la[i], lo[i], la[i]), None)
class Grid3d():
"""
:param minlo:
:param maxlo:
:param minla:
:param maxla:
:param minde:
:param maxde:
:param hspa:
:param vspa:
"""
def __init__(self, minlo, minla, minde, maxlo, maxla, maxde, hspa, vspa):
"""
"""
minlo = minlo+360 if minlo<0 else minlo
maxlo = maxlo+360 if maxlo<0 else maxlo
self.minlo = minlo
self.minla = minla
self.minde = minde
self.maxlo = maxlo
self.maxla = maxla
self.maxde = maxde
self.hspa = hspa
self.vspa = vspa
#
# set projection
clon = (self.minlo+self.maxlo)/2.
self.p = Proj('+proj=lcc +lon_0={:f}'.format(clon))
#
# initialise the grid
self._create_equally_spaced_grid()
def _create_equally_spaced_grid(self):
"""
"""
#
# compute the projected coordinates of the limits of the grid
minx, miny = self.p(self.minlo, self.minla)
minx = np.floor(minx/self.hspa/1e3)*self.hspa
miny = np.ceil(miny/self.hspa/1e3)*self.hspa
#
maxx, maxy = self.p(self.maxlo, self.maxla)
maxx = np.floor(maxx/self.hspa/1e3)*self.hspa
maxy = np.ceil(maxy/self.hspa/1e3)*self.hspa
#
minz = np.floor(self.minde/self.vspa)*self.vspa
maxz = np.ceil(self.maxde/self.vspa)*self.vspa
#
xs = np.arange(minx, maxx, self.hspa)
ys = np.arange(miny, maxy, self.hspa)
zs = np.arange(minz, maxz, self.vspa)
#
#
self.gridx, self.gridy, self.gridz = np.meshgrid(xs, ys, zs)
shp = self.gridx.shape
#
#
tlo, tla = self.p(self.gridx.flatten()*1e3, self.gridy.flatten()*1e3,
inverse=True)
self.gridlo = np.reshape(tlo, shp)
self.gridla = np.reshape(tla, shp)
def get_coordinates_vectors(self):
"""
This returns three vectors containing the coordinates for all the nodes
of the 3D grid
"""
return (self.gridlo.flatten(), self.gridla.flatten(),
self.gridz.flatten())
def select_nodes_within_two_meshesa(self, meshup, meshlo):
"""
:param meshup:
:param meshlo:
"""
idxs = np.isfinite(meshup.depths)
#
# spatial index for top and bottom slabs
siup = rtree.index.Index(generator_function(meshup))
silo = rtree.index.Index(generator_function(meshlo))
#
# compute the concave hull for the top and bottom slab
lonsup = meshup.lons[idxs].flatten()
lonsup = ([x+360 if x<0 else x for x in lonsup])
lonslo = meshlo.lons[idxs].flatten()
lonslo = ([x+360 if x<0 else x for x in lonslo])
ch_up, _ = alpha_shape(lonsup, meshup.lats[idxs].flatten(), 1.0)
ch_lo, _ = alpha_shape(lonslo, meshlo.lats[idxs].flatten(), 1.0)
#
#
mupde = meshup.depths.flatten()
mlode = meshlo.depths.flatten()
#
# find the points within the top and bottom
pin = []
for idx, (lo, la, de) in enumerate(zip(self.gridlo.flatten(),
self.gridla.flatten(),
self.gridz.flatten())):
if ch_up.contains(Point(lo, la)) and ch_lo.contains(Point(lo, la)):
iup = list(siup.nearest((lo, la, lo, la), 1))
ilo = list(silo.nearest((lo, la, lo, la), 2))
if (de - mupde[iup[0]] > 0. and de - mlode[ilo[0]] < 0.):
pin.append(idx)
return self.gridlo.flatten()[pin], self.gridla.flatten()[pin], \
self.gridz.flatten()[pin]
def select_nodes_within_two_meshes(self, meshup, meshlo):
"""
This method selects the points within the slab
:parameter :class:`openquake.hazardlib.geo.mesh.Mesh` meshup:
The upper mesh
:parameter :class:`openquake.hazardlib.geo.mesh.Mesh` meshlo:
The lower mesh
"""
#
# mesh projected x and y
i = np.isfinite(meshup.lons)
mux, muy = self.p(meshup.lons[i].flatten(), meshup.lats[i].flatten())
mlx, mly = self.p(meshlo.lons[i].flatten(), meshlo.lats[i].flatten())
mux /= 1e3
muy /= 1e3
mlx /= 1e3
mly /= 1e3
#
# upper depths for all the points
coos = np.stack((mux, muy)).T
upd = griddata(coos, meshup.depths[i].flatten(),
(self.gridx[:, :, :], self.gridy[:, :, :]),
method='linear')
upd = np.squeeze(upd)
#
# lower depths for all the points
coos = np.stack((mlx, mly)).T
lod = griddata(coos, meshlo.depths[i].flatten(),
(self.gridx[:, :, :], self.gridy[:, :, :]),
method='linear')
lod = np.squeeze(lod)
#
# creating the 3d grid with the upper depths and selecting nodes
# below it
# upd = np.expand_dims(upd, axis=2)
# lod = np.expand_dims(lod, axis=2)
ug = np.tile(upd, (1, 1, self.gridz.shape[2]))
lg = np.tile(lod, (1, 1, self.gridz.shape[2]))
ug = upd
lg = lod
#
# select the nodes
iii = np.nonzero((np.isfinite(ug)) & (np.isfinite(lg)) &
(self.gridz <= ug) & (self.gridz >= lg))
iii = np.nonzero((self.gridz <= lg) & (self.gridz >= ug))
#
# back to geographic coordinates
lo, la = self.p(self.gridx[iii[0], iii[1], iii[2]]*1e3,
self.gridy[iii[0], iii[1], iii[2]]*1e3, inverse=True)
#
return (lo, la, self.gridz[iii[0], iii[1], iii[2]])
|
agpl-3.0
| -156,947,146,470,911,140 | 32.076923 | 79 | 0.528527 | false |
Laurawly/tvm-1
|
python/tvm/relay/param_dict.py
|
2
|
2213
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper utility to save parameter dicts."""
import tvm.runtime
def save_param_dict(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
.. deprecated:: 0.9.0
Use :py:func:`tvm.runtime.save_param_dict` instead.
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
Examples
--------
.. code-block:: python
# set up the parameter dict
params = {"param0": arr0, "param1": arr1}
# save the parameters as byte array
param_bytes = tvm.runtime.save_param_dict(params)
# We can serialize the param_bytes and load it back later.
# Pass in byte array to module to directly set parameters
tvm.runtime.load_param_dict(param_bytes)
"""
return tvm.runtime.save_param_dict(params)
def load_param_dict(param_bytes):
"""Load parameter dictionary to binary bytes.
.. deprecated:: 0.9.0
Use :py:func:`tvm.runtime.load_param_dict` instead.
Parameters
----------
param_bytes: bytearray
Serialized parameters.
Returns
-------
params : dict of str to NDArray
The parameter dictionary.
"""
return tvm.runtime.load_param_dict(param_bytes)
|
apache-2.0
| -119,685,848,773,672,770 | 29.736111 | 65 | 0.680976 | false |
tremblerz/breach-detection-system
|
dashboard/bds/views.py
|
1
|
2233
|
from django.shortcuts import render,redirect
import random,json
from django.http import HttpResponse
from django.contrib.auth import authenticate,logout,login
from bds.models import Packet
# Create your views here.
"""
def send_data(request):
"""
def login_v(request):
message = ""
if request.method == "POST":
user_n = request.POST["usern"]
passw = request.POST["passw"]
user = authenticate(username=user_n,password=passw)
if user is not None:
login(request,user)
return redirect("index")
else:
message="Wromg Credentials"
return render(request,'bds/login_v.html',{'message':message})
def logout_v(request):
logout(request)
return redirect('login')
def index(request):
val=[]
count={'nor' : 0, 'sus' : 0, 'mal' : 0}
k=0
if request.user.is_authenticated:
name = request.user
all_obj = Packet.objects.all()[:20]
for i in all_obj:
if i.breach_confidence<20:
count["nor"] += 1
elif 20<=i.breach_confidence<80:
count["sus"] += 1
else:
count["mal"] += 1
val.append([k+1,i.timestamp,i.source,i.destination,i.mac])
k+=1
else:
return redirect('login')
return render(request,'bds/index.html',{'name':name,'val':val,'count':count})
def confirmed(request):
return render(request, 'bds/confirmed.html')
def suspicious(request):
return render(request, 'bds/suspicious.html')
def TopThreats(request):
return render(request, 'bds/TopThreats.html')
def valueret(request):
all_obj = Packet.objects.all()
val = []
dic = {}
for i in all_obj:
if i.timestamp.strftime("%b") in dic:
if i.breach_confidence<30:
dic[i.timestamp.strftime("%b")]["nor"] += 1
elif 20<=i.breach_confidence<60:
dic[i.timestamp.strftime("%b")]["sus"] += 1
else:
dic[i.timestamp.strftime("%b")]["mal"] += 1
else:
dic[i.timestamp.strftime("%b")] = {"nor":0,"sus":0,"mal":0}
if i.breach_confidence<30:
dic[i.timestamp.strftime("%b")]["nor"] += 1
elif 30<=i.breach_confidence<60:
dic[i.timestamp.strftime("%b")]["sus"] += 1
else:
dic[i.timestamp.strftime("%b")]["mal"] += 1
for i in dic:
val.append({"year":i,"nor":dic[i]["nor"],"sus":dic[i]["sus"],"mal":dic[i]["mal"]})
val=json.dumps(val)
return HttpResponse(val,content_type='application/json')
|
gpl-3.0
| 7,901,635,335,165,355,000 | 26.580247 | 84 | 0.659651 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.