repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
maxmalysh/congenial-octo-adventure
|
report2/task2_compact.py
|
1
|
2980
|
import numpy as np
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0., 3., -1., 8.]])
A2 = np.array(
[[ 77., -23., -32., 0., 0.],
[-23., 53., 0., 0., -18.],
[-32., 0., 90., -5., 0.],
[ 0., 0., -5., 49., -15.],
[ 0., -18., 0., -15., 89.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
b2 = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
def jacobi_method(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("%2d. Current solution: %s" % (it_count, x))
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-8):
break
x = x_new
return x
def gauss_seidel(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("%2d. Current solution: %s" % (it_count, x))
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def sor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("%2d. Current solution: %s" % (it_count, x))
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def ssor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
xk = np.zeros(shape=(ITERATION_LIMIT, x.shape[0]), dtype=np.float)
for it_count in range(ITERATION_LIMIT):
print("%2d. Current solution: %s" % (it_count, x))
k = it_count
xk[k] = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], xk[k-1][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k/2][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
#
for i in reversed(range(A.shape[0])):
s1 = np.dot(A[i, :i], xk[k/2][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, xk[k], rtol=1e-8):
break
x = xk[k]
return x
#A, b = A2, b2
x = sor_method(A, b)
print("Final solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error)
|
unlicense
| 4,159,153,501,352,646,700 | 25.616071 | 72 | 0.437584 | false |
alsoicode/django-maintenancemode
|
maintenancemode/middleware.py
|
1
|
2247
|
import re
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db.utils import DatabaseError
import django.conf.urls as urls
from maintenancemode.models import Maintenance, IgnoredURL
urls.handler503 = 'maintenancemode.views.defaults.temporary_unavailable'
urls.__all__.append('handler503')
class MaintenanceModeMiddleware(object):
def process_request(self, request):
"""
Get the maintenance mode from the database.
If a Maintenance value doesn't already exist in the database, we'll create one.
"has_add_permission" and "has_delete_permission" are overridden in admin
to prevent the user from adding or deleting a record, as we only need one
to affect multiple sites managed from one instance of Django admin.
"""
site = Site.objects.get_current()
try:
maintenance = Maintenance.objects.get(site=site)
except (Maintenance.DoesNotExist, DatabaseError):
for site in Site.objects.all():
maintenance = Maintenance.objects.create(site=site, is_being_performed=False)
# Allow access if maintenance is not being performed
if not maintenance.is_being_performed:
return None
# Allow access if remote ip is in INTERNAL_IPS
if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
return None
# Allow access if the user doing the request is logged in and a
# staff member.
if hasattr(request, 'user') and request.user.is_staff:
return None
# Check if a path is explicitly excluded from maintenance mode
urls_to_ignore = IgnoredURL.objects.filter(maintenance=maintenance)
ignore_urls = tuple([re.compile(r'%s' % str(url.pattern)) for url in urls_to_ignore])
for url in ignore_urls:
if url.match(request.path_info):
return None
# Otherwise show the user the 503 page
resolver = urlresolvers.get_resolver(None)
callback, param_dict = resolver._resolve_special('503')
return callback(request, **param_dict)
|
bsd-3-clause
| 3,394,594,052,935,418,400 | 38.125 | 93 | 0.659991 | false |
abetkin/app-units
|
examples/dj/dj/settings.py
|
1
|
2088
|
"""
Django settings for dj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!xli3!^7cpsmwe#n*x@7+1(rpbrs7nm*mq2gnl_l5laof#4#7#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dj.urls'
WSGI_APPLICATION = 'dj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -5,127,360,724,257,778,000 | 23.27907 | 71 | 0.719349 | false |
Ttl/scikit-rf
|
skrf/io/general.py
|
3
|
22567
|
'''
.. module:: skrf.io.general
========================================
general (:mod:`skrf.io.general`)
========================================
General io functions for reading and writing skrf objects
.. autosummary::
:toctree: generated/
read
read_all
read_all_networks
write
write_all
save_sesh
Writing output to spreadsheet
.. autosummary::
:toctree: generated/
network_2_spreadsheet
networkset_2_spreadsheet
'''
import sys
import six.moves.cPickle as pickle
from six.moves.cPickle import UnpicklingError
import inspect
import os
import zipfile
import warnings
import sys
from ..util import get_extn, get_fid
from ..network import Network
from ..frequency import Frequency
from ..media import Media
from ..networkSet import NetworkSet
from ..calibration.calibration import Calibration
from copy import copy
dir_ = copy(dir)
# delayed import: from pandas import DataFrame, Series for ntwk_2_spreadsheet
# file extension conventions for skrf objects.
global OBJ_EXTN
OBJ_EXTN = [
[Frequency, 'freq'],
[Network, 'ntwk'],
[NetworkSet, 'ns'],
[Calibration, 'cal'],
[Media, 'med'],
[object, 'p'],
]
def read(file, *args, **kwargs):
'''
Read skrf object[s] from a pickle file
Reads a skrf object that is written with :func:`write`, which uses
the :mod:`pickle` module.
Parameters
------------
file : str or file-object
name of file, or a file-object
\*args, \*\*kwargs : arguments and keyword arguments
passed through to pickle.load
Examples
-------------
>>> n = rf.Network(f=[1,2,3],s=[1,1,1],z0=50)
>>> n.write('my_ntwk.ntwk')
>>> n_2 = rf.read('my_ntwk.ntwk')
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Notes
-------
if `file` is a file-object it is left open, if it is a filename then
a file-object is opened and closed. If file is a file-object
and reading fails, then the position is reset back to 0 using seek
if possible.
'''
fid = get_fid(file, mode='rb')
try:
obj = pickle.load(fid, *args, **kwargs)
except (UnpicklingError, UnicodeDecodeError) as e:
# if fid is seekable then reset to beginning of file
fid.seek(0)
if isinstance(file, str):
# we created the fid so close it
fid.close()
raise
if isinstance(file, str):
# we created the fid so close it
fid.close()
return obj
def write(file, obj, overwrite = True):
'''
Write skrf object[s] to a file
This uses the :mod:`pickle` module to write skrf objects to a file.
Note that you can write any pickl-able python object. For example,
you can write a list or dictionary of :class:`~skrf.network.Network`
objects
or :class:`~skrf.calibration.calibration.Calibration` objects. This
will write out a single file. If you would like to write out a
seperate file for each object, use :func:`write_all`.
Parameters
------------
file : file or string
File or filename to which the data is saved. If file is a
file-object, then the filename is unchanged. If file is a
string, an appropriate extension will be appended to the file
name if it does not already have an extension.
obj : an object, or list/dict of objects
object or list/dict of objects to write to disk
overwrite : Boolean
if file exists, should it be overwritten?
Notes
-------
If `file` is a str, but doesnt contain a suffix, one is chosen
automatically. Here are the extensions
==================================================== ===============
skrf object extension
==================================================== ===============
:class:`~skrf.frequency.Frequency` '.freq'
:class:`~skrf.network.Network` '.ntwk'
:class:`~skrf.networkSet.NetworkSet` '.ns'
:class:`~skrf.calibration.calibration.Calibration` '.cal'
:class:`~skrf.media.media.Media` '.med'
other '.p'
==================================================== ===============
To make the file written by this method cross-platform, the pickling
protocol 2 is used. See :mod:`pickle` for more info.
Examples
-------------
Convert a touchstone file to a pickled Network,
>>> n = rf.Network('my_ntwk.s2p')
>>> rf.write('my_ntwk',n)
>>> n_red = rf.read('my_ntwk.ntwk')
Writing a list of different objects
>>> n = rf.Network('my_ntwk.s2p')
>>> ns = rf.NetworkSet([n,n,n])
>>> rf.write('out',[n,ns])
>>> n_red = rf.read('out.p')
See Also
------------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
skrf.network.Network.write : write method of Network
skrf.calibration.calibration.Calibration.write : write method of Calibration
'''
if isinstance(file, str):
extn = get_extn(file)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
file = file + '.' + extn
if os.path.exists(file):
if not overwrite:
warnings.warn('file exists, and overwrite option is False. Not writing.')
return
with open(file, 'wb') as fid:
pickle.dump(obj, fid, protocol=2)
else:
fid = file
pickle.dump(obj, fid, protocol=2)
fid.close()
def read_all(dir='.', contains = None, f_unit = None, obj_type=None):
'''
Read all skrf objects in a directory
Attempts to load all files in `dir`, using :func:`read`. Any file
that is not readable by skrf is skipped. Optionally, simple filtering
can be achieved through the use of `contains` argument.
Parameters
--------------
dir : str, optional
the directory to load from, default \'.\'
contains : str, optional
if not None, only files containing this substring will be loaded
f_unit : ['hz','khz','mhz','ghz','thz']
for all :class:`~skrf.network.Network` objects, set their
frequencies's :attr:`~skrf.frequency.Frequency.f_unit`
obj_type : str
Name of skrf object types to read (ie 'Network')
Returns
---------
out : dictionary
dictionary containing all loaded skrf objects. keys are the
filenames without extensions, and the values are the objects
Examples
----------
>>> rf.read_all('skrf/data/')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
'one_port': one port Calibration: 'one_port', 500-750 GHz, 201 pts, 4-ideals/4-measured,
...
>>> rf.read_all('skrf/data/', obj_type = 'Network')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
...
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
'''
out={}
for filename in os.listdir(dir):
if contains is not None and contains not in filename:
continue
fullname = os.path.join(dir,filename)
keyname = os.path.splitext(filename)[0]
try:
out[keyname] = read(fullname)
continue
except:
pass
try:
out[keyname] = Network(fullname)
continue
except:
pass
if f_unit is not None:
for keyname in out:
try:
out[keyname].frequency.unit = f_unit
except:
pass
if obj_type is not None:
out = dict([(k, out[k]) for k in out if
isinstance(out[k],sys.modules[__name__].__dict__[obj_type])])
return out
def read_all_networks(*args, **kwargs):
'''
Read all networks in a directory.
This is a convenience function. It just calls::
read_all(*args,obj_type='Network', **kwargs)
See Also
----------
read_all
'''
if 'f_unit' not in kwargs:
kwargs.update({'f_unit':'ghz'})
return read_all(*args,obj_type='Network', **kwargs)
ran = read_all_networks
def write_all(dict_objs, dir='.', *args, **kwargs):
'''
Write a dictionary of skrf objects individual files in `dir`.
Each object is written to its own file. The filename used for each
object is taken from its key in the dictionary. If no extension
exists in the key, then one is added. See :func:`write` for a list
of extensions. If you would like to write the dictionary to a single
output file use :func:`write`.
Notes
-------
Any object in dict_objs that is pickl-able will be written.
Parameters
------------
dict_objs : dict
dictionary of skrf objects
dir : str
directory to save skrf objects into
\*args, \*\*kwargs :
passed through to :func:`~skrf.io.general.write`. `overwrite`
option may be of use.
See Also
-----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
----------
Writing a diction of different skrf objects
>>> from skrf.data import line, short
>>> d = {'ring_slot':ring_slot, 'one_port_cal':one_port_cal}
>>> rf.write_all(d)
'''
if not os.path.exists('.'):
raise OSError('No such directory: %s'%dir)
for k in dict_objs:
filename = k
obj = dict_objs[k]
extn = get_extn(filename)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
filename = filename + '.' + extn
try:
with open(os.path.join(dir+'/', filename), 'wb') as fid:
write(fid, obj,*args, **kwargs)
except Exception as inst:
print(inst)
warnings.warn('couldnt write %s: %s'%(k,str(inst)))
pass
def save_sesh(dict_objs, file='skrfSesh.p', module='skrf', exclude_prefix='_'):
'''
Save all `skrf` objects in the local namespace.
This is used to save current workspace in a hurry, by passing it the
output of :func:`locals` (see Examples). Note this can be
used for other modules as well by passing a different `module` name.
Parameters
------------
dict_objs : dict
dictionary containing `skrf` objects. See the Example.
file : str or file-object, optional
the file to save all objects to
module : str, optional
the module name to grep for.
exclude_prefix: str, optional
dont save objects which have this as a prefix.
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
---------
Write out all skrf objects in current namespace.
>>> rf.write_all(locals(), 'mysesh.p')
'''
objects = {}
print('pickling: ')
for k in dict_objs:
try:
if module in inspect.getmodule(dict_objs[k]).__name__:
try:
pickle.dumps(dict_objs[k])
if k[0] != '_':
objects[k] = dict_objs[k]
print(k+', ')
finally:
pass
except(AttributeError, TypeError):
pass
if len (objects ) == 0:
print('nothing')
write(file, objects)
def load_all_touchstones(dir = '.', contains=None, f_unit=None):
'''
Loads all touchtone files in a given dir into a dictionary.
Notes
-------
Alternatively you can use the :func:`read_all` function.
Parameters
-----------
dir : string
the path
contains : string
a string the filenames must contain to be loaded.
f_unit : ['hz','mhz','ghz']
the frequency unit to assign all loaded networks. see
:attr:`frequency.Frequency.unit`.
Returns
---------
ntwkDict : a dictonary with keys equal to the file name (without
a suffix), and values equal to the corresponding ntwk types
Examples
----------
>>> ntwk_dict = rf.load_all_touchstones('.', contains ='20v')
See Also
-----------
read_all
'''
ntwkDict = {}
for f in os.listdir (dir):
if contains is not None and contains not in f:
continue
fullname = os.path.join(dir,f)
keyname,extn = os.path.splitext(f)
extn = extn.lower()
try:
if extn[1]== 's' and extn[-1]=='p':
ntwkDict[keyname]=(Network(dir +'/'+f))
if f_unit is not None: ntwkDict[keyname].frequency.unit=f_unit
except:
pass
return ntwkDict
def write_dict_of_networks(ntwkDict, dir='.'):
'''
Saves a dictionary of networks touchstone files in a given directory
The filenames assigned to the touchstone files are taken from
the keys of the dictionary.
Parameters
-----------
ntwkDict : dictionary
dictionary of :class:`Network` objects
dir : string
directory to write touchstone file to
'''
warnings.warn('Deprecated. use write_all.', DeprecationWarning)
for ntwkKey in ntwkDict:
ntwkDict[ntwkKey].write_touchstone(filename = dir+'/'+ntwkKey)
def read_csv(filename):
'''
Read a 2-port s-parameter data from a csv file.
Specifically, this reads a two-port csv file saved from a Rohde Shcwarz
ZVA-40, and possibly other network analyzers. It returns into a
:class:`Network` object.
Parameters
------------
filename : str
name of file
Returns
--------
ntwk : :class:`Network` object
the network representing data in the csv file
'''
ntwk = Network(name=filename[:-4])
try:
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(9))
s11 = data[:,1] +1j*data[:,2]
s21 = data[:,3] +1j*data[:,4]
s12 = data[:,5] +1j*data[:,6]
s22 = data[:,7] +1j*data[:,8]
ntwk.s = npy.array([[s11, s21],[s12,s22]]).transpose().reshape(-1,2,2)
except(IndexError):
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(3))
ntwk.s = data[:,1] +1j*data[:,2]
ntwk.frequency.f = data[:,0]
ntwk.frequency.unit='ghz'
return ntwk
## file conversion
def statistical_2_touchstone(file_name, new_file_name=None,\
header_string='# GHz S RI R 50.0'):
'''
Converts Statistical file to a touchstone file.
Converts the file format used by Statistical and other Dylan Williams
software to standard touchstone format.
Parameters
------------
file_name : string
name of file to convert
new_file_name : string
name of new file to write out (including extension)
header_string : string
touchstone header written to first beginning of file
'''
if new_file_name is None:
new_file_name = 'tmp-'+file_name
remove_tmp_file = True
# This breaks compatibility with python 2.6 and older
with file(file_name, 'r') as old_file, open(new_file_name, 'w') as new_file:
new_file.write('%s\n'%header_string)
for line in old_file:
new_file.write(line)
if remove_tmp_file is True:
os.rename(new_file_name,file_name)
def network_2_spreadsheet(ntwk, file_name =None, file_type= 'excel', form='db',
*args, **kwargs):
'''
Write a Network object to a spreadsheet, for your boss
Write the s-parameters of a network to a spreadsheet, in a variety
of forms. This functions makes use of the pandas module, which in
turn makes use of the xlrd module. These are imported during this
function call. For more details about the file-writing functions
see the pandas.DataFrom.to_?? functions.
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwk : :class:`~skrf.network.Network` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series # delayed because its not a requirement
file_extns = {'csv':'csv','excel':'xls','html':'html'}
form = form.lower()
if form not in ['db','ri','ma']:
raise ValueError('`form` must be either `db`,`ma`,`ri`')
file_type = file_type.lower()
if file_type not in file_extns.keys():
raise ValueError('file_type must be `csv`,`html`,`excel` ')
if ntwk.name is None and file_name is None:
raise ValueError('Either ntwk must have name or give a file_name')
if file_name is None and 'excel_writer' not in kwargs.keys():
file_name = ntwk.name + '.'+file_extns[file_type]
d = {}
index =ntwk.frequency.f_scaled
if form =='db':
for m,n in ntwk.port_tuples:
d['S%i%i Log Mag(dB)'%(m+1,n+1)] = \
Series(ntwk.s_db[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ma':
for m,n in ntwk.port_tuples:
d['S%i%i Mag(lin)'%(m+1,n+1)] = \
Series(ntwk.s_mag[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ri':
for m,n in ntwk.port_tuples:
d['S%i%i Real'%(m+1,n+1)] = \
Series(ntwk.s_re[:,m,n], index = index)
d[u'S%i%i Imag'%(m+1,n+1)] = \
Series(ntwk.s_im[:,m,n], index = index)
df = DataFrame(d)
df.__getattribute__('to_%s'%file_type)(file_name,
index_label='Freq(%s)'%ntwk.frequency.unit, *args, **kwargs)
def network_2_dataframe(ntwk, attrs=['s_db'], ports = None):
'''
Convert one or more attributes of a network to a pandas DataFrame
Parameters
--------------
ntwk : :class:`~skrf.network.Network` object
the network to write
attrs : list Network attributes
like ['s_db','s_deg']
ports : list of tuples
list of port pairs to write. defaults to ntwk.port_tuples
(like [[0,0]])
Returns
----------
df : pandas DataFrame Object
'''
from pandas import DataFrame, Series # delayed because its not a requirement
d = {}
index =ntwk.frequency.f_scaled
if ports is None:
ports = ntwk.port_tuples
for attr in attrs:
for m,n in ports:
d['%s %i%i'%(attr, m+1,n+1)] = \
Series(ntwk.__getattribute__(attr)[:,m,n], index = index)
return DataFrame(d)
def networkset_2_spreadsheet(ntwkset, file_name=None, file_type= 'excel',
*args, **kwargs):
'''
Write a NetworkSet object to a spreadsheet, for your boss
Write the s-parameters of a each network in the networkset to a
spreadsheet. If the `excel` file_type is used, then each network,
is written to its own sheet, with the sheetname taken from the
network `name` attribute.
This functions makes use of the pandas module, which in turn makes
use of the xlrd module. These are imported during this function
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwkset : :class:`~skrf.networkSet.NetworkSet` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series, ExcelWriter # delayed because its not a requirement
if ntwkset.name is None and file_name is None:
raise(ValueError('Either ntwkset must have name or give a file_name'))
if file_type == 'excel':
writer = ExcelWriter(file_name)
[network_2_spreadsheet(k, writer, sheet_name =k.name, *args, **kwargs) for k in ntwkset]
writer.save()
else:
[network_2_spreadsheet(k,*args, **kwargs) for k in ntwkset]
# Provide a StringBuffer that let's me work with Python2 strings and Python3 unicode strings without thinking
if sys.version_info < (3, 0):
import StringIO
class StringBuffer(StringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
else:
import io
StringBuffer = io.StringIO
|
bsd-3-clause
| 6,781,361,543,470,911,000 | 29.089333 | 109 | 0.577347 | false |
owlabs/incubator-airflow
|
airflow/www_rbac/app.py
|
1
|
12500
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import socket
from datetime import timedelta
from typing import Any
import six
import pendulum
from flask import Flask, session as flask_session
from flask_appbuilder import AppBuilder, SQLA
from flask_caching import Cache
from flask_wtf.csrf import CSRFProtect
from six.moves.urllib.parse import urlparse
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from airflow import settings, version
from airflow.configuration import conf
from airflow.logging_config import configure_logging
from airflow.www_rbac.static_config import configure_manifest_files
app = None # type: Any
appbuilder = None
csrf = CSRFProtect()
log = logging.getLogger(__name__)
def create_app(config=None, session=None, testing=False, app_name="Airflow"):
global app, appbuilder
app = Flask(__name__)
if conf.getboolean('webserver', 'ENABLE_PROXY_FIX'):
app.wsgi_app = ProxyFix(
app.wsgi_app,
num_proxies=conf.get("webserver", "PROXY_FIX_NUM_PROXIES", fallback=None),
x_for=conf.getint("webserver", "PROXY_FIX_X_FOR", fallback=1),
x_proto=conf.getint("webserver", "PROXY_FIX_X_PROTO", fallback=1),
x_host=conf.getint("webserver", "PROXY_FIX_X_HOST", fallback=1),
x_port=conf.getint("webserver", "PROXY_FIX_X_PORT", fallback=1),
x_prefix=conf.getint("webserver", "PROXY_FIX_X_PREFIX", fallback=1)
)
app.secret_key = conf.get('webserver', 'SECRET_KEY')
session_lifetime_days = conf.getint('webserver', 'SESSION_LIFETIME_DAYS', fallback=30)
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=session_lifetime_days)
app.config.from_pyfile(settings.WEBSERVER_CONFIG, silent=True)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['APP_NAME'] = app_name
app.config['TESTING'] = testing
app.config['SESSION_COOKIE_HTTPONLY'] = True
app.config['SESSION_COOKIE_SECURE'] = conf.getboolean('webserver', 'COOKIE_SECURE')
app.config['SESSION_COOKIE_SAMESITE'] = conf.get('webserver', 'COOKIE_SAMESITE')
if config:
app.config.from_mapping(config)
csrf.init_app(app)
db = SQLA(app)
from airflow import api
api.load_auth()
api.API_AUTH.api_auth.init_app(app)
# flake8: noqa: F841
cache = Cache(app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
from airflow.www_rbac.blueprints import routes
app.register_blueprint(routes)
configure_logging()
configure_manifest_files(app)
with app.app_context():
from airflow.www_rbac.security import AirflowSecurityManager
security_manager_class = app.config.get('SECURITY_MANAGER_CLASS') or \
AirflowSecurityManager
if not issubclass(security_manager_class, AirflowSecurityManager):
raise Exception(
"""Your CUSTOM_SECURITY_MANAGER must now extend AirflowSecurityManager,
not FAB's security manager.""")
appbuilder = AppBuilder(
app,
db.session if not session else session,
security_manager_class=security_manager_class,
base_template='airflow/master.html',
update_perms=conf.getboolean('webserver', 'UPDATE_FAB_PERMS'))
def init_views(appbuilder):
from airflow.www_rbac import views
# Remove the session from scoped_session registry to avoid
# reusing a session with a disconnected connection
appbuilder.session.remove()
appbuilder.add_view_no_menu(views.Airflow())
appbuilder.add_view_no_menu(views.DagModelView())
appbuilder.add_view(views.DagRunModelView,
"DAG Runs",
category="Browse",
category_icon="fa-globe")
appbuilder.add_view(views.JobModelView,
"Jobs",
category="Browse")
appbuilder.add_view(views.LogModelView,
"Logs",
category="Browse")
appbuilder.add_view(views.SlaMissModelView,
"SLA Misses",
category="Browse")
appbuilder.add_view(views.TaskInstanceModelView,
"Task Instances",
category="Browse")
appbuilder.add_view(views.ConfigurationView,
"Configurations",
category="Admin",
category_icon="fa-user")
appbuilder.add_view(views.ConnectionModelView,
"Connections",
category="Admin")
appbuilder.add_view(views.PoolModelView,
"Pools",
category="Admin")
appbuilder.add_view(views.VariableModelView,
"Variables",
category="Admin")
appbuilder.add_view(views.XComModelView,
"XComs",
category="Admin")
if "dev" in version.version:
airflow_doc_site = "https://airflow.readthedocs.io/en/latest"
else:
airflow_doc_site = 'https://airflow.apache.org/docs/{}'.format(version.version)
appbuilder.add_link("Documentation",
href=airflow_doc_site,
category="Docs",
category_icon="fa-cube")
appbuilder.add_link("GitHub",
href='https://github.com/apache/airflow',
category="Docs")
appbuilder.add_view(views.VersionView,
'Version',
category='About',
category_icon='fa-th')
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
flask_appbuilder_views, flask_appbuilder_menu_links
)
for v in flask_appbuilder_views:
log.debug("Adding view %s", v["name"])
appbuilder.add_view(v["view"],
v["name"],
category=v["category"])
for ml in sorted(flask_appbuilder_menu_links, key=lambda x: x["name"]):
log.debug("Adding menu link %s", ml["name"])
appbuilder.add_link(ml["name"],
href=ml["href"],
category=ml["category"],
category_icon=ml["category_icon"])
integrate_plugins()
# Garbage collect old permissions/views after they have been modified.
# Otherwise, when the name of a view or menu is changed, the framework
# will add the new Views and Menus names to the backend, but will not
# delete the old ones.
def init_plugin_blueprints(app):
from airflow.plugins_manager import flask_blueprints
for bp in flask_blueprints:
log.debug("Adding blueprint %s:%s", bp["name"], bp["blueprint"].import_name)
app.register_blueprint(bp["blueprint"])
init_views(appbuilder)
init_plugin_blueprints(app)
if conf.getboolean('webserver', 'UPDATE_FAB_PERMS'):
security_manager = appbuilder.sm
security_manager.sync_roles()
from airflow.www_rbac.api.experimental import endpoints as e
# required for testing purposes otherwise the module retains
# a link to the default_auth
if app.config['TESTING']:
if six.PY2:
reload(e) # noqa
else:
import importlib
importlib.reload(e)
app.register_blueprint(e.api_experimental, url_prefix='/api/experimental')
server_timezone = conf.get('core', 'default_timezone')
if server_timezone == "system":
server_timezone = pendulum.local_timezone().name
elif server_timezone == "utc":
server_timezone = "UTC"
default_ui_timezone = conf.get('webserver', 'default_ui_timezone')
if default_ui_timezone == "system":
default_ui_timezone = pendulum.local_timezone().name
elif default_ui_timezone == "utc":
default_ui_timezone = "UTC"
if not default_ui_timezone:
default_ui_timezone = server_timezone
@app.context_processor
def jinja_globals(): # pylint: disable=unused-variable
globals = {
'server_timezone': server_timezone,
'default_ui_timezone': default_ui_timezone,
'hostname': socket.getfqdn() if conf.getboolean(
'webserver', 'EXPOSE_HOSTNAME', fallback=True) else 'redact',
'navbar_color': conf.get('webserver', 'NAVBAR_COLOR'),
'log_fetch_delay_sec': conf.getint(
'webserver', 'log_fetch_delay_sec', fallback=2),
'log_auto_tailing_offset': conf.getint(
'webserver', 'log_auto_tailing_offset', fallback=30),
'log_animation_speed': conf.getint(
'webserver', 'log_animation_speed', fallback=1000)
}
if 'analytics_tool' in conf.getsection('webserver'):
globals.update({
'analytics_tool': conf.get('webserver', 'ANALYTICS_TOOL'),
'analytics_id': conf.get('webserver', 'ANALYTICS_ID')
})
return globals
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
@app.before_request
def before_request():
_force_log_out_after = conf.getint('webserver', 'FORCE_LOG_OUT_AFTER', fallback=0)
if _force_log_out_after > 0:
flask.session.permanent = True
app.permanent_session_lifetime = datetime.timedelta(minutes=_force_log_out_after)
flask.session.modified = True
flask.g.user = flask_login.current_user
@app.after_request
def apply_caching(response):
_x_frame_enabled = conf.getboolean('webserver', 'X_FRAME_ENABLED', fallback=True)
if not _x_frame_enabled:
response.headers["X-Frame-Options"] = "DENY"
return response
@app.before_request
def make_session_permanent():
flask_session.permanent = True
return app, appbuilder
def root_app(env, resp):
resp('404 Not Found', [('Content-Type', 'text/plain')])
return [b'Apache Airflow is not at this location']
def cached_app(config=None, session=None, testing=False):
global app, appbuilder
if not app or not appbuilder:
base_url = urlparse(conf.get('webserver', 'base_url'))[2]
if not base_url or base_url == '/':
base_url = ""
app, _ = create_app(config, session, testing)
app = DispatcherMiddleware(root_app, {base_url: app})
return app
def cached_appbuilder(config=None, testing=False):
global appbuilder
cached_app(config=config, testing=testing)
return appbuilder
|
apache-2.0
| -2,300,563,895,249,644,500 | 40.254125 | 97 | 0.57592 | false |
vvtam/virtualenv
|
virtualenv.py
|
1
|
100200
|
#!/usr/bin/env python
"""Create a "virtual" Python installation"""
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import zlib
import errno
import glob
import distutils.sysconfig
import struct
import subprocess
from distutils.util import strtobool
__version__ = "13.2.0.dev0"
virtualenv_version = __version__ # legacy
if sys.version_info < (2, 6):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.6 or greater.')
sys.exit(101)
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
# Return a mapping of version -> Python executable
# Only provided for Windows, where the information in the registry is used
if not is_win:
def get_installed_pythons():
return {}
else:
try:
import winreg
except ImportError:
import _winreg as winreg
def get_installed_pythons():
try:
python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore")
except WindowsError:
# No registered Python installations
return {}
i = 0
versions = []
while True:
try:
versions.append(winreg.EnumKey(python_core, i))
i = i + 1
except WindowsError:
break
exes = dict()
for ver in versions:
try:
path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver)
except WindowsError:
continue
exes[ver] = join(path, "python.exe")
winreg.CloseKey(python_core)
# Add the major versions
# Sort the keys, then repeatedly update the major version entry
# Last executable (i.e., highest version) wins with this approach
for ver in sorted(exes):
exes[ver[0]] = exes[ver]
return exes
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver >= 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if minver >= 4:
REQUIRED_MODULES.extend([
'operator',
'_collections_abc',
'_bootlocale',
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest, symlink=True):
if os.path.isdir(src):
shutil.copytree(src, dest, symlink)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s', os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest, symlink)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest, symlink)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
files = glob.glob(os.path.join(dir, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = [here, join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(
os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity.")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity.')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch.")
parser.set_defaults(system_site_packages=False)
parser.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="DEPRECATED. Retained only for backward compatibility. "
"Not having access to global site-packages is now the default behavior.")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give the virtual environment access to the global site-packages.")
parser.add_option(
'--always-copy',
dest='symlink',
action='store_false',
default=True,
help="Always copy files rather than symlinking.")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools when installing it.")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative.')
parser.add_option(
'--no-setuptools',
dest='no_setuptools',
action='store_true',
help='Do not install setuptools (or pip) in the new virtualenv.')
parser.add_option(
'--no-pip',
dest='no_pip',
action='store_true',
help='Do not install pip in the new virtualenv.')
parser.add_option(
'--no-wheel',
dest='no_wheel',
action='store_true',
help='Do not install wheel in the new virtualenv.')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
metavar='DIR',
default=default_search_dirs,
help="Directory to look for setuptools/pip distributions in. "
"This option can be used multiple times.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
default=True,
help="DEPRECATED. Retained only for backward compatibility. This option has no effect. "
"Virtualenv never downloads pip or setuptools.")
parser.add_option(
'--prompt',
dest='prompt',
help='Provides an alternative prompt prefix for this environment.')
parser.add_option(
'--setuptools',
dest='setuptools',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
parser.add_option(
'--distribute',
dest='distribute',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if not options.never_download:
logger.warn('The --never-download option is for backward compatibility only.')
logger.warn('Setting it to false is no longer supported, and will be ignored.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=True,
no_setuptools=options.no_setuptools,
no_pip=options.no_pip,
no_wheel=options.no_wheel,
symlink=options.symlink)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def filter_install_output(line):
if line.strip().startswith('running'):
return Logger.INFO
return Logger.DEBUG
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + '-*.whl'))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal('Cannot find a wheel for %s' % (project,))
return wheels
def install_wheel(project_names, py_executable, search_dirs=None):
if search_dirs is None:
search_dirs = file_search_dirs()
wheels = find_wheels(['setuptools', 'pip'], search_dirs)
pythonpath = os.pathsep.join(wheels)
findlinks = ' '.join(search_dirs)
cmd = [
py_executable, '-c',
'import sys, pip; sys.exit(pip.main(["install", "--ignore-installed"] + sys.argv[1:]))',
] + project_names
logger.start_progress('Installing %s...' % (', '.join(project_names)))
logger.indent += 2
try:
call_subprocess(cmd, show_stdout=False,
extra_env = {
'PYTHONPATH': pythonpath,
'JYTHONPATH': pythonpath, # for Jython < 3.x
'PIP_FIND_LINKS': findlinks,
'PIP_USE_WHEEL': '1',
'PIP_PRE': '1',
'PIP_NO_INDEX': '1'
}
)
finally:
logger.indent -= 2
logger.end_progress()
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False,
prompt=None, search_dirs=None, never_download=False,
no_setuptools=False, no_pip=False, no_wheel=False,
symlink=True):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear, symlink=symlink))
install_distutils(home_dir)
if not no_setuptools:
to_install = ['setuptools']
if not no_pip:
to_install.append('pip')
if not no_wheel:
to_install.append('wheel')
install_wheel(to_install, py_executable, search_dirs)
install_activate(home_dir, bin_dir, prompt)
install_python_config(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
home_dir = os.path.abspath(home_dir)
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
import ctypes
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir)+1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
u = unicode
except NameError:
u = str
ret = GetShortPathName(u(home_dir), buf, size)
if not ret:
print('Error: the path "%s" has a space in it' % home_dir)
print('We could not determine the short pathname for it.')
print('Exiting.')
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if is_darwin:
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
if hasattr(sys, 'base_prefix'):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix, symlink):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
# special-case custom readline.so on OS X, but not for pypy:
if modname == 'readline' and sys.platform == 'darwin' and not (
is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))):
dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so')
elif modname == 'readline' and sys.platform == 'win32':
# special-case for Windows, where readline is not a
# standard module, though it may have been installed in
# site-packages by a third-party package
pass
else:
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename, symlink)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1], symlink)
finally:
sys.path = _prev_sys_path
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn('Path not in prefix %r %r', prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, 'base_prefix'):
logger.notify('Using base prefix %r' % sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir, symlink)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn), symlink)
# ...and modules
copy_required_modules(home_dir, symlink)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc') or site_filename.endswith('.pyo'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir, symlink)
else:
logger.debug('No include dir %s' % stdinc_dir)
platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platinc_dir != stdinc_dir:
platinc_dest = distutils.sysconfig.get_python_inc(
plat_specific=1, prefix=home_dir)
if platinc_dir == platinc_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platinc_dest = subst_path(platinc_dir, prefix, home_dir)
if platinc_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copyfile(platinc_dir, platinc_dest, symlink)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn), symlink)
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name), symlink)
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
del os.environ["__PYVENV_LAUNCHER__"]
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable, symlink)
if is_win:
for name in ['libexpat.dll', 'libpypy.dll', 'libpypy-c.dll',
'libeay32.dll', 'ssleay32.dll', 'sqlite3.dll',
'tcl85.dll', 'tk85.dll']:
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name), symlink)
for d in sys.path:
if d.endswith('lib_pypy'):
break
else:
logger.fatal('Could not find lib_pypy in sys.path')
raise SystemExit(3)
logger.info('Copying lib_pypy')
copyfile(d, os.path.join(home_dir, 'lib_pypy'), symlink)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext.lower() == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib,
symlink)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
if not is_win:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
py_exe_no_version = 'python'
required_symlinks = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
if symlink:
os.symlink(py_executable_base, full_pth)
else:
copyfile(py_executable, full_pth, symlink)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir, symlink)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
if is_win or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
install_files(home_dir, bin_dir, prompt, files)
def install_files(home_dir, bin_dir, prompt, files):
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_python_config(home_dir, bin_dir, prompt=None):
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {}
else:
files = {'python-config': PYTHON_CONFIG}
install_files(home_dir, bin_dir, prompt, files)
for name, content in files.items():
make_exe(os.path.join(bin_dir, name))
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir, symlink=True):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name), symlink)
def fix_lib64(lib_dir, symlink=True):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
# PyPy's library path scheme is not affected by this.
# Return early or we will die on the following assert.
if is_pypy:
logger.debug('PyPy detected, skipping lib64 symlinking')
return
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, 'lib')
lib64_link = os.path.join(top_level, 'lib64')
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
if os.path.lexists(lib64_link):
return
if symlink:
os.symlink('lib', lib64_link)
else:
copyfile('lib', lib64_link)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
# If the "executable" is a version number, get the installed executable for
# that version
python_versions = get_installed_pythons()
if exe in python_versions:
exe = python_versions[exe]
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir, bin_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py',
'activate.fish', 'activate.csh']
def fixup_scripts(home_dir, bin_dir):
if is_win:
new_shebang_args = (
'%s /c' % os.path.normcase(os.environ.get('COMSPEC', 'cmd.exe')),
'', '.exe')
else:
new_shebang_args = ('/usr/bin/env', sys.version[:3], '')
# This is what we expect at the top of scripts:
shebang = '#!%s' % os.path.normcase(os.path.join(
os.path.abspath(bin_dir), 'python%s' % new_shebang_args[2]))
# This is what we'll put:
new_shebang = '#!%s python%s%s' % new_shebang_args
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
old_shebang = lines[0].strip()
old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:])
if not old_shebang.startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqToZTKdOJ0e3tO3RsncVrfuYm3yc7m1vXoKAmyWFMkS5C2tTd3f/u9DwAE
+CHb2+6cphNLJPDw8PC+8PAeOhqNTopCZkuxyZd1KoWScblYiyKu1kqs8lJU66Rc7hdxWW3h6eIm
vpZKVLlQWxVhqygInv/GT/BcfF4nyqAA3+K6yjdxlSziNN2KZFPkZSWXYlmXSXYtkiypkjhN/g4t
8iwSz387BsFZJmDmaSJLcStLBXCVyFfiYlut80yM6wLn/DL6Y/xqMhVqUSZFBQ1KjTNQZB1XQSbl
EtCElrUCUiaV3FeFXCSrZGEb3uV1uhRFGi+k+K//4qlR0zAMVL6Rd2tZSpEBMgBTAqwC8YCvSSkW
+VJGQryRixgH4OcNsQKGNsU1U0jGLBdpnl3DnDK5kErF5VaM53VFgAhlscwBpwQwqJI0De7y8kZN
YElpPe7gkYiZPfzJMHvAPHH8LucAjh+z4C9Zcj9l2MA9CK5aM9uUcpXcixjBwk95Lxcz/WycrMQy
Wa2ABlk1wSYBI6BEmswPClqOb/UKfXdAWFmujGEMiShzY35JPaLgrBJxqoBt6wJppAjzd3KexBlQ
I7uF4QAikDToG2eZqMqOQ7MTOQAocR0rkJKNEuNNnGTArD/GC0L7r0m2zO/UhCgAq6XEL7Wq3PmP
ewgArR0CTANcLLOadZYmNzLdTgCBz4B9KVWdVigQy6SUiyovE6kIAKC2FfIekJ6KuJSahMyZRm6n
RH+iSZLhwqKAocDjSyTJKrmuS5IwsUqAc4Er3n/8Sbw7fXN28kHzmAHGMnu9AZwBCi20gxMMIA5q
VR6kOQh0FJzjHxEvlyhk1zg+4NU0OHhwpYMxzL2I2n2cBQey68XVw8AcK1AmNFZA/f4bukzVGujz
Pw+sdxCcDFGFJs7f7tY5yGQWb6RYx8xfyBnBtxrOd1FRrV8DNyiEUwGpFC4OIpggPCCJS7NxnklR
AIulSSYnAVBoTm39VQRW+JBn+7TWLU4ACGWQwUvn2YRGzCRMtAvrNeoL03hLM9NNArvOm7wkxQH8
ny1IF6VxdkM4KmIo/jaX10mWIULIC0G4F9LA6iYBTlxG4pxakV4wjUTI2otbokjUwEvIdMCT8j7e
FKmcsviibt2tRmgwWQmz1ilzHLSsSL3SqjVT7eW9w+hLi+sIzWpdSgBezz2hW+X5VMxBZxM2Rbxh
8arucuKcoEeeqBPyBLWEvvgdKHqiVL2R9iXyCmgWYqhgladpfgckOwoCIfawkTHKPnPCW3gH/wJc
/DeV1WIdBM5IFrAGhcgPgUIgYBJkprlaI+Fxm2bltpJJMtYUebmUJQ31OGIfMOKPbIxzDT7klTZq
PF1c5XyTVKiS5tpkJmzxsrBi/fia5w3TAMutiGamaUOnDU4vLdbxXBqXZC5XKAl6kV7bZYcxg54x
yRZXYsNWBt4BWWTCFqRfsaDSWVWSnACAwcIXZ0lRp9RIIYOJGAbaFAR/E6NJz7WzBOzNZjlAhcTm
ewH2B3D7O4jR3ToB+iwAAmgY1FKwfPOkKtFBaPRR4Bt905/HB049W2nbxEOu4iTVVj7OgjN6eFqW
JL4LWWCvqSaGghlmFbp21xnQEcV8NBoFgXGHtsp8zVVQldsjYAVhxpnN5nWChm82Q1Ovf6iARxHO
wF43287CAw1hOn0AKjldVmW+wdd2bp9AmcBY2CPYExekZSQ7yB4nvkbyuSq9ME3RdjvsLFAPBRc/
nb4/+3L6SRyLy0alTdv67ArGPM1iYGuyCMBUrWEbXQYtUfElqPvEezDvxBRgz6g3ia+Mqxp4F1D/
XNb0Gqax8F4Gpx9O3pyfzv7y6fSn2aezz6eAINgZGezRlNE81uAwqgiEA7hyqSJtX4NOD3rw5uST
fRDMEjX75mtgN3gyvpYVMHE5hhlPRbiJ7xUwaDilphPEsdMALHg4mYjvxOHz568OCVqxLbYADMyu
0xQfzrRFnyXZKg8n1PgXdumPWUlp/+3y6OsrcXwswl/i2zgMwIdqmjJL/Eji9HlbSOhawZ9xriZB
sJQrEL0biQI6fk5+8YQ7wJJAy1zb6V/yJDPvmSvdIUh/jKkH4DCbLdJYKWw8m4VABOrQ84EOETvX
KHVj6Fhs3a4TjQp+SgkLm2GXKf7Tg2I8p36IBqPodjGNQFw3i1hJbkXTh36zGeqs2WysBwRhJokB
h4vVUChME9RZZQJ+LXEe6rC5ylP8ifBRC5AA4tYKtSQukt46RbdxWks1diYFRByPW2RERZso4kdw
UcZgiZulm0za1DQ8A82AfGkOWrRsUQ4/e+DvgLoymzjc6PHei2mGmP477zQIB3A5Q1T3SrWgsHYU
F6cX4tWLw310Z2DPubTU8ZqjhU6yWtqHK1gtIw+MMPcy8uLSZYV6Fp8e7Ya5iezKdFlhpZe4lJv8
Vi4BW2RgZ5XFT/QGduYwj0UMqwh6nfwBVqHGb4xxH8qzB2lB3wGotyEoZv3N0u9xMEBmChQRb6yJ
1HrXz6awKPPbBJ2N+Va/BFsJyhItpnFsAmfhPCZDkwgaArzgDCl1J0NQh2XNDivhjSDRXiwbxRoR
uHPU1Ff09SbL77IZ74SPUemOJ5Z1UbA082KDZgn2xHuwQoBkDhu7hmgMBVx+gbK1D8jD9GG6QFna
WwAgMPSKtmsOLLPVoynyrhGHRRiT14KEt5ToL9yaIWirZYjhQKK3kX1gtARCgslZBWdVg2YylDXT
DAZ2SOJz3XnEW1AfQIuKEZjNsYbGjQz9Lo9AOYtzVyk5/dAif/nyhdlGrSm+gojNcdLoQqzIWEbF
FgxrAjrBeGQcrSE2uAPnFsDUSrOm2P8k8oK9MVjPCy3b4AfA7q6qiqODg7u7u0hHF/Ly+kCtDv74
p2+++dML1onLJfEPTMeRFh1qiw7oHXq00bfGAn1nVq7Fj0nmcyPBGkvyysgVRfy+r5NlLo72J1Z/
Ihc3Zhr/Na4MKJCZGZSpDLQdNRg9U/vPoldqJJ6RdbZtxxP2S7RJtVbMt7rQo8rBEwC/ZZHXaKob
TlDiK7BusENfynl9HdrBPRtpfsBUUU7Hlgf2X14hBj5nGL4ypniGWoLYAi2+Q/qfmG1i8o60hkDy
oonq7J63/VrMEHf5eHm3vqYjNGaGiULuQInwmzxaAG3jruTgR7u2aPcc19Z8PENgLH1gmFc7lmMU
HMIF12LqSp3D1ejxgjTdsWoGBeOqRlDQ4CTOmdoaHNnIEEGid2M2+7ywugXQqRU5NPEBswrQwh2n
Y+3arOB4QsgDx+IlPZHgIh913r3gpa3TlAI6LR71qMKAvYVGO50DX44NgKkYlX8ZcUuzTfnYWhRe
gx5gOceAkMFWHWbCN64PONob9bBTx+oP9WYa94HARRpzLOpR0AnlYx6hVCBNxdjvOcTilrjdwXZa
HGIqs0wk0mpAuNrKo1eodhqmVZKh7nUWKVqkOXjFVisSIzXvfWeB9kH4uM+YaQnUZGjI4TQ6Jm/P
E8BQt8Pw2XWNgQY3DoMYbRJF1g3JtIZ/wK2g+AYFo4CWBM2CeayU+RP7HWTOzld/GWAPS2hkCLfp
kBvSsRgajnm/J5CMOhoDUpABCbvCSK4jq4MUOMxZIE+44bUclG6CESmQM8eCkJoB3Omlt8HBJxGe
gJCEIuT7SslCfCVGsHxtUX2c7v5dudQEIcZOA3IVdPTi2I1sOFGN41aUw2doP75BZyVFDhw8B5fH
DfS7bG6Y1gZdwFn3FbdFCjQyxWFGExfVK0MYN5j8h2OnRUMsM4hhKG8g70jHjDQJ7HJr0LDgBoy3
5u2x9GM3YoF9x2GuDuXmHvZ/YZmoRa5Cipm0YxfuR3NFlzYW2/NkPoI/3gKMJlceJJnq+AVGWf6B
QUIPetgH3ZsshkWWcXmXZCEpME2/Y39pOnhYUnpG7uATbacOYKIY8Tx4X4KA0NHnAYgTagLYlctQ
abe/C3bnFEcWLncfeW7z5dGrqy5xp0MRHvvpX6rT+6qMFa5WyovGQoGr1TXgqHRhcnG21YeX+nAb
twllrmAXKT5++iKQEBzXvYu3T5t6w/CIzYNz8j4GddBrD5KrNTtiF0AEtSIyykH4dI58PLJPndyO
iT0ByJMYZseiGEiaT/4ROLsWCsbYX24zjKO1VQZ+4PU3X896IqMukt98PXpglBYx+sR+3PIE7cic
VLBrtqWMU3I1nD4UVMwa1rFtignrc9r+aR676vE5NVo29t3fAj8GCobUJfgIL6YN2bpTxY/vTg3C
03ZqB7DObtV89mgRYG+fz3+BHbLSQbXbOEnpXAEmv7+PytVs7jle0a89PEg7FYxDgr79l7p8AdwQ
cjRh0p2OdsZOTMC5ZxdsPkWsuqjs6RyC5gjMywtwjz+HFU6ve+B7Bge/r7p8IiBvTqMeMmpbbIZ4
wQclhz1K9gnzfvqMf9dZP27mw4L1/zHLF/+cST5hKgaaNh4+rH5iuXbXAHuEeRpwO3e4hd2h+axy
ZZw7VklKPEfd9VzcUboCxVbxpAigLNnv64GDUqoPvd/WZclH16QCC1nu43HsVGCmlvH8ek3Mnjj4
ICvExDZbUKzayevJ+4Qv1NFnO5Ow2Tf0c+c6NzErmd0mJfQFhTsOf/j442nYb0IwjgudHm9FHu83
INwnMG6oiRM+pQ9T6Cld/nH10d66+AQ1GQEmIqzJ1iVsJxBs4gj9a/BARMg7sOVjdtyhL9ZycTOT
lDqAbIpdnaD4W3yNmNiMAj//S8UrSmKDmSzSGmnFjjdmH67qbEHnI5UE/0qnCmPqECUEcPhvlcbX
Ykydlxh60txI0anbuNTeZ1HmmJwq6mR5cJ0shfy1jlPc1svVCnDBwyv9KuLhKQIl3nFOAyctKrmo
y6TaAglileuzP0p/cBrOtzzRsYckH/MwATEh4kh8wmnjeybc0pDLBAf8Ew+cJO67sYOTrBDRc3if
5TMcdUY5vlNGqnsuT4+D9gg5ABgBUJj/aKIjd/4bSa/cA0Zac5eoqCU9UrqRhpycMYQynmCkg3/T
T58RXd4awPJ6GMvr3Vhet7G87sXy2sfyejeWrkjgwtqglZGEvsBV+1ijN9/GjTnxMKfxYs3tMPcT
czwBoijMBtvIFKdAe5EtPt8jIKS2nQNnetjkzyScVFrmHALXIJH78RBLb+ZN8rrTmbJxdGeeinFn
h3KI/L4HUUSpYnPqzvK2jKs48uTiOs3nILYW3WkDYCra6UQcK81uZ3OO7rYs1ejiPz//8PEDNkdQ
I5PeQN1wEdGw4FTGz+PyWnWlqdn8FcCO1NJPxKFuGuDeIyNrPMoe//OOMjyQccQdZSjkogAPgLK6
bDM39ykMW891kpR+zkzOh03HYpRVo2ZSA0Q6ubh4d/L5ZEQhv9H/jlyBMbT1pcPFx7SwDbr+m9vc
Uhz7gFDr2FZj/Nw5ebRuOOJhG2vAdjzf1oPDxxjs3jCBP8t/KqVgSYBQkQ7+PoVQj945/Kb9UIc+
hhE7yX/uyRo7K/adI3uOi+KIft+xQ3sA/7AT9xgzIIB2ocZmZ9DslVtK35rXHRR1gD7S1/vNe832
1qu9k/EpaifR4wA6lLXNht0/75yGjZ6S1ZvT788+nJ+9uTj5/IPjAqIr9/HTwaE4/fGLoPwQNGDs
E8WYGlFhJhIYFrfQSSxz+K/GyM+yrjhIDL3enZ/rk5oNlrpg7jPanAiecxqThcZBM45C24c6/wgx
SvUGyakponQdqjnC/dKG61lUrvOjqVRpjs5qrbdeulbM1JTRuXYE0geNXVIwCE4xg1eUxV6ZXWHJ
J4C6zqoHKW2jbWJISkHBTrqAc/5lTle8QCl1hidNZ63oL0MX1/AqUkWawE7udWhlSXfD9JiGcfRD
e8DNePVpQKc7jKwb8qwHsUCr9Trkuen+k4bRfq0Bw4bB3sG8M0npIZSBjcltIsRGfJITynv4apde
r4GCBcODvgoX0TBdArOPYXMt1glsIIAn12B9cZ8AEFor4R8IHDnRAZljdkb4drPc/3OoCeK3/vnn
nuZVme7/TRSwCxKcShT2ENNt/A42PpGMxOnH95OQkaPUXPHnGssDwCGhAKgj7ZS/xCfos7GS6Urn
l/j6AF9oP4Fet7qXsih1937XOEQJeKbG5DU8U4Z+IaZ7WdhTnMqkBRorHyxmWEHopiGYz574tJZp
qvPdz96dn4LviMUYKEF87nYKw3G8BI/QdfIdVzi2QOEBO7wukY1LdGEpyWIZec16g9YoctTby8uw
60SB4W6vThS4jBPloj3GaTMsU04QISvDWphlZdZutUEKu22I4igzzBKzi5ISWH2eAF6mpzFviWCv
hKUeJgLPp8hJVpmMxTRZgB4FlQsKdQpCgsTFekbivDzjGHheKlMGBQ+LbZlcrys83YDOEZVgYPMf
T76cn32gsoTDV43X3cOcU9oJTDmJ5BhTBDHaAV/ctD/kqtmsj2f1K4SB2gf+tF9xdsoxD9Dpx4FF
/NN+xXVox85OkGcACqou2uKBGwCnW5/cNLLAuNp9MH7cFMAGMx8MxSKx7EUnerjz63KibdkyJRT3
MS+fcICzKmxKmu7spqS1P3qOqwLPuZbj/kbwtk+2zGcOXW86b4aS39xPRwqxJBYw6rb2xzDZYZ2m
ejoOsw1xC21rtY39OXNipU67RYaiDEQcu50nLpP1K2HdnDnQS6PuABPfanSNJPaq8tHP2Uh7GB4m
ltidfYrpSGUsZAQwkiF17U8NPhRaBFAglP07diR3Onl+6M3RsQYPz1HrLrCNP4Ai1Lm4VOORl8CJ
8OVXdhz5FaGFevRIhI6nkskst3li+Llbo1f50p9jrwxQEBPFroyzazlmWFMD8yuf2AMhWNK2Hqkv
k6s+wyLOwDm9H+Dwrlz0H5wY1FqM0Gl3I7dtdeSTBxv0loLsJJgPvozvQPcXdTXmlRw4h+6tpRuG
+jBEzD6Epvr0fRxiOObXcGB9GsC91NCw0MP7deDsktfGOLLWPraqmkL7QnuwixK2ZpWiYxmnONH4
otYLaAzucWPyR/apThSyv3vqxJyYkAXKg7sgvbmNdINWOGHE5UpcOZpQOnxTTaPfLeWtTMFogJEd
Y7XDL7baYRLZcEpvHthvxu5ie7Htx43eNJgdmXIMRIAKMXoDPbsQanDAFf5Z70Ti7Iac47d/PZuK
tx9+gn/fyI9gQbHmcSr+BqOLt3kJ20ou2qXbFLCAo+L9Yl4rLIwkaHRCwRdPoLd24ZEXT0N0ZYlf
UmIVpMBk2nLDt50AijxBKmRv3ANTLwG/TUFXywk1DmLfWoz0S6TBcI0L1oUc6JbRutqkaCac4Eiz
iJej87O3px8+nUbVPTK2+Tlygid+HhZORx8Nl3gMNhX2yaLGJ1eOv/yDTIsed1nvNU29DO41RQjb
kcLuL/kmjdjuKeISAwai2C7zRYQtgdO5RK+6A/954mwrH7TvnnFFWOOJPjxrnHh8DNQQP7f1zwga
Uh89J+pJCMVzrBXjx9Go3wJPBUW04c/zm7ulGxDXRT80wTamzazHfnerAtdMZw3PchLhdWyXwdSB
pkmsNvOFWx/4MRP6IhRQbnS8IVdxnVZCZrCVor093UgBCt4t6WMJYVZhK0Z1bhSdSe/irXJyj2Il
RjjqiIrq8RyGAoWw9f4xvmEzgLWGouYSaIBOiNK2KXe6qnqxZgnmnRBRryff4C7JXrnJL5rCPChv
jBeN/wrzRG+RMbqWlZ4/PxhPLl82CQ4UjF54Bb2LAoydyyZ7oDGL58+fj8S/Pez0MCpRmuc34I0B
7F5n5ZxeDxhsPTm7Wl2H3ryJgB8Xa3kJD64oaG6f1xlFJHd0pQWR9q+BEeLahJYZTfuWOeZYXcnn
y9yCz6m0wfhLltB1RxhRkqhs9a1RGG0y0kQsCYohjNUiSUKOTsB6bPMaa/Ewuqj5Rd4DxycIZopv
8WCMd9hrdCwpb9Zyj0XnWIwI8IhSyng0KmamajTAc3ax1WjOzrKkaspIXrhnpvoKgMreYqT5SsR3
KBlmHi1iOGWdHqs2jnW+k0W9jUq+uHTjjK1Z8uuHcAfWBknLVyuDKTw0i7TIZbkw5hRXLFkklQPG
tEM43JkubyLrEwU9KI1AvZNVWFqJtm//YNfFxfQjHR/vm5F01lBlL8TimFCctfIKo6gZn6JPlpCW
b82XCYzygaLZ2hPwxhJ/0LFUrCHw7u1wyxnrTN/HwWkbzSUdAIfugLIK0rKjpyOci8csfGbagVs0
8EM7c8LtNimrOk5n+tqHGfppM3uervG0ZXA7CzyttwK+fQ6O777O2AfHwSTXID0x49ZUZByLlY5M
RG5lmV+EVeTo5R2yrwQ+BVJmOTP10CZ2dGnZ1Raa6gRHR8UjqK9M8dKAQ26qZjoFJy7mU0pvMuUO
A86zn29JV1eI78T41VQctnY+i2KLNzkBss+Woe+KUTeYihMMMHNs34shvjsW45dT8ccd0KOBAY4O
3RHa+9gWhEEgr66eTMY0mRPZwr4U9of76hxG0PSM4+SqTf4umb4lKv1ri0pcIagTlV+2E5VbYw/u
WzsfH8lwA4pjlcjl/jOFJNRIN7p5mMEJPyyg37M5Wrp2vKmoocK5OWxG7ho96GhE4zbbQUxRulZf
XL+LuoYNp71zwKTJtFIV7S1zmMao0WsRFQDM+o7S8Bve7QLvNSlc/2zwiFUXAViwPREEXenJB2ZN
w0ZQH3QEn6QBHmAUEeJhaqMoXMl6goiEdA8OMdFXrUNsh+N/d+bhEoOho9AOlt98vQtPVzB7izp6
FnR3pYUnsra8ollu8+kPzHmM0tf1NwmMA6URHXBWzVWV5GYeYfYy30GT2yzmDV4GSSfTaBJT6bpN
vJXmW7/Qj6HYASWTwVqAJ1Wv8CD5lu62PFGU9IZX1Hx9+HJqKoMZkJ7Aq+jVV/oKSOpmLj/wfeyp
3rvBS93vMPoXB1hS+b3tq85uhqZ13LoLyh8spOjZJJpZOjSG6eE6kGbNYoF3JjbEZN/aXgDyHryd
Ofg55vLTHBw22JBGfei6GqOR3iHVNiDAD5uMIcl5VNdGkSLSu4RtSHnuUpxPFgXdq9+CYAgBOX8d
8xt0BeviyIbYjE3Bk8+xm82Jn+qmt+6M7Qka2+om3DV97r9r7rpFYGdukhk6c/frS10a6L7DVrSP
Bhze0IR4VIlEo/H7jYlrB6Y6h6Y/Qq8/SH63E850wKw8BMZk7GC8n9hTY2/M/iZeuN8xIWyfL2R2
y4l7nY3WtDs2o83xj/EUOPkFn9sbBiijaak5kPdLdMPejHNkZ/L6Ws1ivN1xRptsyufq7J7Mtu09
Xc4nY7U1uy28tAhAGG7Smbducj0wBuhKvmWa06Gc22kEDU1Jw04WskqWbBL01g7ARRwxpf4mEM9p
xKNUYqBb1WVRwm54pO8i5jydvtTmBqgJ4G1idWNQNz2m+mpaUqyUHGZKkDlO20ryASKwEe+YhtnM
vgNeedFcs5BMLTPIrN7IMq6aK4b8jIAENl3NCFR0jovrhOcaqWxxiYtYYnnDQQoDZPb7V7Cx9DbV
O+5VmFht93h2oh465PuUKxscY2S4OLm31wu611ot6Wpr1zu0zRqus1cqwTKYu/JIR+pYGb/V93fx
HbMcyUf/0uEfkHe38tLPQrfqjL1bi4bzzFUI3Qub8MYAMs599zB2OKB742JrA2zH9/WFZZSOhznQ
2FJR++S9CqcZbdJEkDBh9IEIkl8U8MQIkgf/kREkfWsmGBqNj9YDvWUCD4SaWD24V1A2jAB9ZkAk
PMBuXWBoTOXYTbovcpXcj+yF0qwrnUo+Yx6QI7t3kxEIvmpSuRnK3lVwuyJIvnTR4+/PP745OSda
zC5O3v7HyfeUlIXHJS1b9egQW5bvM7X3vfRvN9ymE2n6Bm+w7bkhlmuYNITO+04OQg+E/nq1vgVt
KzL39VCHTt1PtxMgvnvaLahDKrsXcscv0zUmbvpMK0870E85qdb8cjITzCNzUsfi0JzEmffN4YmW
0U5seWjhnPTWrjrR/qq+BXQg7j2xSda0Anhmgvxlj0xMxYwNzLOD0v7ffFBmOFYbmht0QAoX0rnJ
kS5xZFCV//8TKUHZxbi3Y0dxau/mpnZ8PKTspfN49ruQkSGIV+436s7PFfalTAeoEASs8PQ9hYyI
0X/6QNWmHzxT4nKfCov3Udlc2V+4Ztq5/WuCSQaVve9LcYISH7NC41WduokDtk+nAzl9dBqVr5xK
FtB8B0DnRjwVsDf6S6wQ51sRwsZRu2SYHEt01Jf1Ocij3XSwN7R6IfaHyk7dskshXg43XLYqO3WP
Q+6hHuihalPc51hgzNIcqicV3xFkPs4UdMGX53zgGbre9sPX28uXR/ZwAfkdXzuKhLLJRo5hv3Sy
MXdeKul0J2Ypp5Suh3s1JySsW1w5UNknGNrbdEpSBvY/Js+BIY289/0hM9PDu3p/1MbUst4RTEmM
n6kJTcsp4tG42yeT7nQbtdUFwgVJjwDSUYEAC8F0dKOTILrlLO/xC70bnNd0Ha97whQ6UkHJYj5H
cA/j+zX4tbtTIfGjujOKpj83aHOgXnIQbvYduNXEC4UMm4T21Bs+GHABuCa7v//LR/TvpjHa7oe7
/Grb6lVvHSD7spj5iplBLRKZxxEYGdCbY9LWWC5hBB2voWno6DJUMzfkC3T8KJsWL9umDQY5szPt
AVijEPwfucjncQ==
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVV1v2jAUffevuATUtd0YYo+bmEZVJJBaqBrWaVsn1ySXxlpwkO1Q6Md/n50vElK6h5YHSHyP
r4/vOffShGnAFcx5iLCIlYYZQqzQhzuuA3BUFEsPYcZFh3mar5hGB47nMlrAjKngmDRhE8XgMSEi
DTIWwDX4XKKnww0hPua74PAIHgiYTywUamjPYbnxI48ka02QaFej0AcUKy4jsUChYcUkZ7MQVYLi
c2jAb2jfg9N6oJOzU3o1upx+75/Ri/50+J4+OfAHvoAOUCR4+7GRntOqoZ0CgetlJHUCLNZSjrVN
SXzO/0fm53Q4GQ8n54M9lIp4jVgRqdMrQi+SrKIM1ay8icgqiGJTYB+1kSfRD5jw4V4FH+Au4F4A
AVshMPNjQl60WNiwDphOrJGlMgbxWBgai+gIbg0Do7h5mkfSviyZ8VC2VX0E+GFsFMU6C2subrM0
FaAtELRsicELmLhFBXfGj8y3Xxuw3jLHGo8sDXP0cwFM+YUt/0nfHbaTUj8+Fou/3CG9Gly6o8m4
/ZwMySXbEj597fi46og4DCtFe0Fgt7tHWbdbk9Tt1rV0uy+KmIVzIikkjw7GV9vrNyy3rr1eDxwR
CR+VlrFtOdyh1wQXwznkgMYOgbYN5b2an/5ETHuncS4lhrhilZ4sdXf1bEJKbHsOpaVXSh2S1aF8
JVJrNlPJtFGzlxK6Q+nJaEzH/fMBpZ8zWLmPC97bhrAFMyukImupVde7gu5pLEtrp013jyJWudSe
ySnl25+O3P7J2YBeXE7OL6Y1Y+4awZ6WOyjJ6Ky31UxzmIJCowfrPXasw7cZMVRYRR/emLGAgi0Q
rislv3ZujmC7MxuCJUPbOzfhnP1FULFEOxJiwULOVDroLX2u3ylgoUTmbyxViaSMKHUilB4PDqqZ
CEnhxT/KcmNmjID2IsvjtL45qXnfZOy9wch75bh7zah7bswZrf4B7cdZow==
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJyFVVFv0zAQfs+vONJO3RDNxCsSQoMVrdK2Vl03CSHkesllMXLsYDvZivjx2GmTOG0YfWhV+7u7
73z33Y1gnTENKeMIeakNPCKUGhP7xcQTbCJ4ZOKcxoZV1GCUMp1t4O0zMxkTQEGVQjicO4dTyIwp
Ppyfu386Q86jWOZwBhq1ZlK8jYIRXEoQ0jhDYAYSpjA2fBsFQVoKG0UKSLAJB9MEJrMXi6uYMiXl
KCrIZYJARQIKTakEGAkmQ+tU5ZSDRTAlRY7CRJMA7GdkgRoNSJ74t1BRxegjR12jWAoGbfpTAeGY
LK4vycN8tb6/uCbLi/VVWGPcx3maPr2AO4VjYB+HMAxAkQT/i/ptfbW4vVrczAZit3eHDNqL13n0
Ya+w+Tq/uyLL1eJmuSaLh9lqNb/0+IzgznqnAjAvzBa4jG0BNmNXfdJUkxTU2I6xRaKcy+e6VApz
WVmoTGFTgwslrYdN03ONrbbMN1E/FQ7H7gOP0UxRjV67TPRBjF3naCMV1mSkYk9MUN7F8cODZzsE
iIHYviIe6n8WeGQxWKuhl+9Xa49uijq7fehXMRxT9VR9f/8jhDcfYSKkSOyxKp22cNIrIk+nzd2b
Yc7FNpHx8FUn15ZfzXEE98JxZEohx4r6kosCT+R9ZkHQtLmXGYSEeH8JCTvYkcRgXAutp9Rw7Jmf
E/J5fktuL25m1tMe3vLdjDt9bNxr2sMo2P3C9BccqGeYhqfQITz6XurXaqdf99LF1mT2YJrvzqCu
5w7dKvV3PzNyOb+7+Hw923dOuB+AX2SxrZs9Lm0xbCH6kmhjUyuWw+7cC7DX8367H3VzDz6oBtty
tMIeobE21JT6HaRS+TbaoqhbE7rgdGs3xtE4cOF3xo0TfxwsdyRlhUoxuzes18r+Jp88zDx1G+kd
/HTrr1BY2CeuyfnbQtAcu9j+pOw6cy9X0k3IuoyKCZPC5ESf6MkgHE5tLiSW3Oa+W2NnrQfkGv/h
7tR5PNFnMBlw4B9NJTxnzKA9fLTT0aXSb5vw7FUKzcTZPddqYHi2T9/axJmEEN3qHncVCuEPaFmq
uEtpcBj2Z1wjrqGReJBHrY6/go21NA==
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx1U2FP2zAQ/e5f8TAV3Soo+0zXbYUiDQkKQgVp2ibjJNfFUuIg22nVf885SVFLO3+I7Lt3fr6X
d8eY58ZjYQpCWfuAhFB7yrAyIYf0Ve1SQmLsuU6DWepAw9TnEoOFq0rwdjAUx/hV1Ui1tVWAqy1M
QGYcpaFYx+yVI67LkKwx1UuTEaYGl4X2Bl+zJpAlP/6V2hTDtCq/DYXQhdEeGW040Q/Eb+t9V/e3
U/V88zh/mtyqh8n8J47G+IKTE3gKZJdoYrK3h5MRU1tGYS83gqNc+3yEgyyP93cP820evHLvr2H8
kaYB/peoyY7aVHzpJnE9e+6I5Z+ji4GMTNJWNuOQq6MA1N25p8pW9HWdVWlfsNpPDbdxjgpaahuw
1M7opCA/FFu1uwxC7L8KUqmto1KyQe3rx0I0Eovdf7BVe67U5c1MzSZ310pddGheZoFPWyytRkzU
aCA/I+RkBXhFXr5aWV0SxjhUI6jwdAj8kmhPzX7nTfJFkM3MImp2VdVFFq1vLHSU5szYQK4Ri+Jd
xlW2JBtOGcyYVW7SnB3v6RS91g3gKapZ0oWxbHVteYIIq3iv7QeuSrUj6KSqQ+yqsxDj1ivNQxKF
YON10Q+NH/ARS95i5Tuqq2Vxfvc23f/FO6zrtXXmJr+ZtMY9/A15ZXFWtmch2rEQ4g1ryVHH
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT
PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt
r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X
0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw==
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4ipOLVEI8wwKCXX0iXf1C7Pl4spMU0hJTcvMS01RiPf3cYmHyQYE+fsGhCho
cCkAAUibEkTEVhWLMlUlLk6QGixStlyaeCyJDPHw9/Pw93VFsQguim4ZXAJoIUw5DhX47XUM8UCx
EchHtwsohN1bILUgw61c/Vy4AJYPYm4=
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmO41hyfW+g/0FTU7C7IXeJIqmtB/3AnZRIStxF2kaBm7gv4ipyMF/mB3+Sf8GXVGVl1tLT
43ECSqR4b5wbETeWE8z/+a///vNCDaN6cYtSf5G1dbNw/IVXNIu6aCvX9xa3qsgWl0IJ/7IYinbh
2nkOVqs2X0TNjz/8eeFFle826fBhQRaLBkD9uviw+LCy3Sbq7Mb/UNbrH3+YNtLcVaB+Xbipb+eL
tly0eVsD/M6u6g8//vC+dquobH5VWU75eMFUdvHb4n02RHlXuHYTFfmHbHCLLLNz70NpN+GrBI4p
1EeSk4FAXaZR88u0vPip8usi7fznt3fvP+OuPnx49/Pil4td+XnzigIAPoqYQH2J8v4z+C+8b98m
Q25t7k76LIK0cOz0V89/MXXx0+Lf6z5q3PA/F+/FIif9uqnaadFf/PzXSXYBfqIb2NeApecJwPzI
dlL/149nnvyoc7KqYfzTAT8v/voUmX7e+3n364tffl/oVaDyswKY/7J18e6bve8Wv9RuUfqfLHmK
/u139Hwx+9ePRep97KKqae30YwmCo2y+0vTz1k+rv7159B3pb1SOGj97Pe8/flfkC1Vn/7xYR4n6
lypNEGDDV5f7lcjil3S+4++p881Wv6qKyn5GQg1yJwcp4BZ5E+Wt/z1P/umbiHir4J8Xip/eFt6n
9T/9gU9eY+7zUX97Jlmb136ziKrKT/3OzpvP8VX/+MObSP0lL3LvVZlJ9v1b8357jXyw8rXxYPXN
11n4UzJ8G8S/vUbuJ6RPj999DbtS5kys//JusXwrNLnvT99cFlBNwXCe+niRz8JF/ezNr9Pze+H6
18W7d5PPvozW7+387Zto/v4pL8BvbxTzvIW9KCv/Fj0WzVQb/YXbVlPZWTz3/9vCaRtQbPN/Bb+j
2rUrDxTVD68gfQXu/ZewAFX53U/vf/rD2P3558W7+W79Po1y/xXoX/6RFHyNIoVjgAG4H0RTcAe5
3bSVv3DSwk2mZYHjFB8zj6fC4sLOFTHJJQrwzFYJgso0ApOoBzFiRzzQKjIQCCbQMIFJGCKqGUyS
8AkjiF2wTwmMEbcEUvq8Nj+X0f4YcCQmYRiOY7eRbAJDqzm1chOoNstbJ8oTBhZQ2NcfgaB6QjLp
U4+SWFjQGCZpyqby8V4JkPGs9eH1BscXIrTG24QxXLIgCLYNsIlxSYLA6SjAeg7HAg4/kpiIB8k9
TCLm0EM4gKIxEj8IUj2dQeqSxEwYVH88qiRlCLjEYGuNIkJB1BA5dHOZdGAoUFk54WOqEojkuf4Q
Ig3WY+96TDlKLicMC04h0+gDCdYHj0kz2xBDj9ECDU5zJ0tba6RKgXBneewhBG/xJ5m5FX+WSzsn
wnHvKhcOciw9NunZ0BUF0n0IJAcJMdcLqgQb0zP19dl8t9PzmMBjkuIF7KkvHgqEovUPOsY0PBB1
HCtUUhch83qEJPjQcNQDsgj0cRqx2ZbnnlrlUjE1EX2wFJyyDa/0GLrmKDEFepdWlsbmVU45Wiwt
eFM6mfs4kxg8yc4YmKDy67dniLV5FUeO5AKNPZaOQQ++gh+dXE7dbJ1aTDr7S4WPd8sQoQkDyODg
XnEu/voeKRAXZxB/e2xaJ4LTFLPYEJ15Ltb87I45l+P6OGFA5F5Ix8A4ORV6M1NH1uMuZMnmFtLi
VpYed+gSq9JDBoHc05J4OhKetrk1p0LYiKipxLMe3tYS7c5V7O1KcPU8BJGdLfcswhoFCSGQqJ8f
ThyQKy5EWFtHVuNhvTnkeTc8JMpN5li3buURh0+3ZGuzdwM55kon+8urbintjdQJf9U1D0ah+hNh
i1XNu4fSKbTC5AikGEaj0CYM1dpuli7EoqUt7929f1plxGGNZnixFSFP2qzhlZMonu2bB9OWSqYx
VuHKWNGJI8kqUhMTRtk0vJ5ycZ60JlodlmN3D9XiEj/cG2lSt+WV3OtMgt1Tf4/Z+1BaCus740kx
Nvj78+jMd9tq537Xz/mNFyiHb0HdwHytJ3uQUzKkYhK7wjGtx3oKX43YeYoJVtqDSrCnQFzMemCS
2bPSvP+M4yZFi/iZhAjL4UOeMfa7Ex8HKBqw4umOCPh+imOP6yVTwG2MplB+wtg97olEtykNZ6wg
FJBNXSTJ3g0CCTEEMdUjjcaBDjhJ9fyINXgQVHhA0bjk9lhhhhOGzcqQSxYdj3iIN2xGEOODx4qj
Q2xikJudC1ujCVOtiRwhga5nPdhe1gSa649bLJ0wCuLMcEYIeSy25YcDQHJb95nfowv3rQnin0fE
zIXFkM/EwSGxvCCMgEPNcDp/wph1gMEa8Xd1qAWOwWZ/KhjlqzgisBpDDDXz9Cmov46GYBKHC4zZ
84HJnXoTxyWNBbXV4LK/r+OEwSN45zBp7Cub3gIYIvYlxon5BzDgtPUYfXAMPbENGrI+YVGSeTQ5
i8NMB5UCcC+YRGIBhgs0xhAGwSgYwywpbu4vpCSTdEKrsy8osXMUnHQYenQHbOBofLCNNTg3CRRj
A1nXY2MZcjnXI+oQ2Zk+561H4CqoW61tbPKv65Y7fqc3TDUF9CA3F3gM0e0JQ0TPADJFJXVzphpr
2FzwAY8apGCju1QGOiUVO5KV6/hKbtgVN6hRVwpRYtu+/OC6w2bCcGzZQ8NCc4WejNEjFxOIgR3o
QqR1ZK0IaUxZ9nbL7GWJIjxBARUhAMnYrq/S0tVOjzlOSYRqeIZxaSaOBX5HSR3MFekOXVdUPbjX
nru61fDwI8HRYPUS7a6Inzq9JLjokU6P6OzT4UCH+Nha+JrU4VqEo4rRHQJhVuulAnvFhYz5NWFT
aS/bKxW6J3e46y4PLagGrCDKcq5B9EmP+s1QMCaxHNeM7deGEV3WPn3CeKjndlygdPyoIcNaL3dd
bdqPs47frcZ3aNWQ2Tk+rjFR01Ul4XnQQB6CSKA+cZusD0CP3F2Ph0e78baybgioepG12luSpFXi
bHbI6rGLDsGEodMObDG7uyxfCeU+1OiyXYk8fnGu0SpbpRoEuWdSUlNi5bd9nBxYqZGrq7Qa7zV+
VLazLcelzzP9+n6+xUtWx9OVJZW3gk92XGGkstTJ/LreFVFF2feLpXGGuQqq6/1QbWPyhJXIXIMs
7ySVlzMYqoPmnmrobbeauMIxrCr3sM+qs5HpwmmFt7SM3aRNQWpCrmeAXY28EJ9uc966urGKBL9H
18MtDE5OX97GDOHxam11y5LCAzcwtkUu8wqWI1dWgHyxGZdY8mC3lXzbzncLZ2bIUxTD2yW7l9eY
gBUo7uj02ZI3ydUViL7oAVFag37JsjYG8o4Csc5R7SeONGF8yZP+7xxi9scnHvHPcogJ44VH/LMc
Yu6Vn3jEzCFw9Eqq1ENQAW8aqbUwSiAqi+nZ+OkZJKpBL66Bj8z+ATqb/8qDIJUeNRTwrI0YrVmb
9FArKVEbCWUNSi8ipfVv+STgkpSsUhcBg541eeKLoBpLGaiHTNoK0r4nn3tZqrcIULtq20Df+FVQ
Sa0MnWxTugMuzD410sQygF4qdntbswiJMqjs014Irz/tm+pd5oygJ0fcdNbMg165Pqi7EkYGAXcB
dwxioCDA3+BY9+JjuOmJu/xyX2GJtaKSQcOZxyqFzTaa6/ot21sez0BtKjirROKRm2zuai02L0N+
ULaX8H5P6VwsGPbYOY7sAy5FHBROMrMzFVPYhFHZ7M3ZCZa2hsT4jGow6TGtG8Nje9405uMUjdF4
PtKQjw6yZOmPUmO8LjFWS4aPCfE011N+l3EdYq09O3iQJ9a01B3KXiMF1WmtZ+l1gmyJ/ibAHZil
vQzdOl6g9PoSJ4TM4ghTnTndEVMOmsSSu+SCVlGCOLQRaw9oLzamSWP62VuxPZ77mZYdfTRGuNBi
KyhZL32S2YckO/tU7y4Bf+QKKibQSKCTDWPUwWaE8yCBeL5FjpbQuAlb53mGX1jptLeRotREbx96
gnicYz0496dYauCjpTCA4VA0cdLJewzRmZeTwuXWD0talJsSF9J1Pe72nkaHSpULgNeK1+o+9yi0
YpYwXZyvaZatK2eL0U0ZY6ekZkFPdC8JTF4Yo1ytawNfepqUKEhwznp6HO6+2l7L2R9Q3N49JMIe
Z+ax1mVaWussz98QbNTRPo1xu4W33LJpd9H14dd66ype7UktfEDi3oUTccJ4nODjwBKFxS7lYWiq
XoHu/b7ZVcK5TbRD0F/2GShg2ywwUl07k4LLqhofKxFBNd1grWY+Zt/cPtacBpV9ys2z1moMLrT3
W0Elrjtt5y/dvDQYtObYS97pqj0eqmwvD3jCPRqamGthLiF0XkgB6IdHLBBwDGPiIDh7oPaRmTrN
tYA/yQKFxRiok+jM6ciJq/ZgiOi5+W4DEmufPEubeSuYJaM3/JHEevM08yJAXUQwb9LS2+8FOfds
FfOe3Bel6EDSjIEIKs4o9tyt67L1ylQlzhe0Q+7ue/bJnWMcD3q6wDSIQi8ThnRM65aqLWesi/ZM
xhHmQvfKBbWcC194IPjbBLYR9JTPITbzwRcu+OSFHDHNSYCLt29sAHO6Gf0h/2UO9Xwvhrjhczyx
Ygz6CqP4IwxQj5694Q1Pe2IR+KF/yy+5PvCL/vgwv5mPp9n4kx7fnY/nmV++410qF/ZVCMyv5nAP
pkeOSce53yJ6ahF4aMJi52by1HcCj9mDT5i+7TF6RoPaLL+cN1hXem2DmX/mdIbeeqwQOLD5lKO/
6FM4x77w6D5wMx3g0IAfa2D/pgY9a7bFQbinLDPz5dZi9ATIrd0cB5xfC0BfCCZO7TKP0jQ2Meih
nRXhkA3smTAnDN9IW2vA++lsgNuZ2QP0UhqyjUPrDmgfWP2bWWiKA+YiEK7xou8cY0+d3/bk0oHR
QLrq4KzDYF/ljQDmNhBHtkVNuoDey6TTeaD3SHO/Bf4d3IwGdqQp6FuhmwFbmbQBssDXVKDBYOpk
Jy7wxOaSRwr0rDmGbsFdCM+7XU/84JPu3D/gW7QXgzlvbjixn99/8CpWFUQWHFEz/RyXvzNXTTOd
OXLNNFc957Jn/YikNzEpUdRNxXcC6b76ccTwMGoKj5X7c7TvHFgc3Tf4892+5A+iR+D8OaaE6ACe
gdgHcyCoPm/xiDCWP+OZRjpzfj5/2u0i4qQfmIEOsTV9Hw6jZ3Agnh6hiwjDtGYxWvt5TiWEuabN
77YCyRXwO8P8wdzG/8489KwfFBZWI6Vvx76gmlOc03JI1HEfXYZEL4sNFQ3+bqf7e2hdSWQknwKF
ICJjGyDs3fdmnnxubKXebpQYLjPgEt9GTzKkUgTvOoQa1J7N3nv4sR6uvYFLhkXZ+pbCoU3K9bfq
gF7W82tNutRRZExad+k4GYYsCfmEbvizS4jsRr3fdzqjEthpEwm7pmN7OgVzRbrktjrFw1lc0vM8
V7dyTJ71qlsd7v3KhmHzeJB35pqEOk2pEe5uPeCToNkmedmxcKbIj+MZzjFSsvCmimaMQB1uJJKa
+hoWUi7aEFLvIxKxJavqpggXBIk2hr0608dIgnfG5ZEprqmH0b0YSy6jVXTCuIB+WER4d5BPVy9Q
M4taX0RIlDYxQ2CjBuq78AAcHQf5qoKP8BXHnDnd/+ed5fS+csL4g3eWqECaL+8suy9r8hx7c+4L
EegEWdqAWN1w1NezP34xsxLkvRRI0DRzKOg0U+BKfQY128YlYsbwSczEg2LqKxRmcgiwHdhc9MQJ
IwKQHlgBejWeMGDYYxTOQUiJOmIjJbzIzHH6lAMP+y/fR0v1g4wx4St8fcqTt3gz5wc+xXFZZ3qI
JpXI5iJk7xmNL2tYsDpcqu0375Snd5EKsIvg8u5szTOyZ4v06Ny2TZXRpHUSinh4IFp8Eoi7GINJ
02lPJnS/9jSxolJwp2slPMIEbjleWw3eec4XaetyEnSSqTPRZ9fVA0cPXMqzrPYQQyrRux3LaAh1
wujbgcObg1nt4iiJ5IMbc/WNPc280I2T4nTkdwG8H6iS5xO2WfsFsruBwf2QkgZlb6w7om2G65Lr
r2Gl4dk63F8rCEHoUJ3fW+pU2Srjlmcbp+JXY3DMifEI22HcHAvT7zzXiMTr7VbUR5a2lZtJkk4k
1heZZFdru8ucCWMTr3Z4eNnjLm7LW7rcN7QjMpxrsCzjxndeyFUX7deIs3PQkgyH8k6luI0uUyLr
va47TBjM4JmNHFzGPcP6BV6cYgQy8VQYZe5GmzZHMxyBYhGiUdekZQ/qwyxC3WGylQGdUpSf9ZCP
a7qPdJd31fPRC0TOgzupO7nLuBGr2A02yuUQwt2KQG31sW8Gd9tQiHq+hPDt4OzJuY4pS8XRsepY
tsd7dVEfJFmc15IYqwHverrpWyS1rFZibDPW1hUUb+85CGUzSBSTK8hpvee/ZxonW51TUXekMy3L
uy25tMTg4mqbSLQQJ+skiQu2toIfBFYrOWql+EQipgfT15P1aq6FDK3xgSjIGWde0BPftYchDTdM
i4QdudHFkN0u6fSKiT09QLv2mtSblt5nNzBR6UReePNs+khE4rHcXuoK21igUKHl1c3MXMgPu7y8
rKQDxR6N/rffXv+lROXet/9Q+l9I4D1U
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E
UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB
C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss
aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT
0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9
oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE
NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c
f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8
p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk
vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw
hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh
cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw
buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ
5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh
gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC
1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL
MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6
84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK
0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO
kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG
qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h
kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9
GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ=
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jnDOstQA4dMGCHbeihlyEIDMWmE62yJEiKE//7kXKdpEWLzYBt8evx
kRSzLPs6wiEoswM8YdMpjUXcq1Dz6RZa1cSiTkJdr86GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiEe2NtLAFikftnDco0ko/SFEVgEZ8aRCZDIPY9xbA8pE9M4jfW/B2CjiHq9zbJVZuOQq
siwTIvpxKYCembPAU4Muwi/Z4zfvrZ/MXipKeB8C+qisSZYiWfjJfs+0/MFMdWn1hJcO5U7G/SLa
xVx8zU6VG/PXLXvfsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCJN9dzKaoexyB/uH79TnjwvxcW0ntSb
yZ8jq1Z5Q1UXsyy3gf9nbjTEj7NzQMfCJa/YSmrQ+2D/BqfiOi6sclrGzvoeVivIj8rcfcmnIQRF
7XCyeZI7DFe5/lhlCs5PRf5QW66VXT/NrlQ46oD/D6InkOmi3IQcbhKxAX2g4a+Xd5s3UtCtG2py
m8eg6WYWqR6SL5OjKMGfSrYt/6kxxQtOpeAgj1LXBNmpE2ElmCSIy5H0zFd8gJ924HWijWhb2hRC
6wNEm1QdDZtuSZcEprIUBo/XRNcbQe1OUbQ/r3hPTaPJJDNtFLu8KHV5XoNr3Eo6h6YtOKw8e8yw
VF5PnJ+ts3a9/Mz38RpG/AUSzYUW
""")
##file python-config
PYTHON_CONFIG = convert("""
eJyNVV1P2zAUfc+v8ODBiSABxlulTipbO6p1LWqBgVhlhcZpPYUkctzSivHfd6+dpGloGH2Ja/ue
e+65Hz78xNhtf3x90xmw7vCWsRPGLvpDNuz87MKfdKMWSWxZ4ilNpCLZJiuWc66SVFUOZkkcirll
rfxIBAzOMtImDzSVPBRrekwoX/OZu/0r4lm0DHiG60g86u8sjPw5rCyy86NRkB8QuuBRSqfAKESn
3orLTCQxE3GYkC9tYp8fk89OSwNsmXgizrhUtnumeSgeo5GbLUMk49Rv+2nK48Cm/qMwfp333J2/
dVcAGE0CIQHBsgIeEr4Wij0LtWDLzJ9ze5YEvH2WI6CHTAVcSu9ZCsXtgxu81CIvp6/k4eXsdfo7
PvDCRD75yi41QitfzlcPp1OI7i/1/iQitqnr0iMgQ+A6wa+IKwwdxyk9IiXNAzgquTFU8NIxAVjM
osm1Zz526e+shQ4hKRVci69nPC3Kw4NQEmkQ65E7OodxorSvxjvpBjQHDmWFIQ1mlmzlS5vedseT
/mgIEsMJ7Lxz2bLAF9M5xeLEhdbHxpWOw0GdkJApMVBRF1y+a0z3c9WZPAXGFcFrJgCIB+024uad
0CrzmEoRa3Ub4swNIHPGf7QDV+2uj2OiFWsChgCwjKqN6rp5izpbH6Wc1O1TclQTP/XVwi6anTr1
1sbubjZLI1+VptPSdCfwnFBrB1jvebrTA9uUhU2/9gad7xPqeFkaQcnnLbCViZK8d7R1kxzFrIJV
8EaLYmKYpvGVkig+3C5HCXbM1jGCGekiM2pRCVPyRyXYdPf6kcbWEQ36F5V4Gq9N7icNNw+JHwRE
LTgxRXACpvnQv/PuT0xCCAywY/K4hE6Now2qDwaSE5FB+1agsoUveYDepS83qFcF1NufvULD3fTl
g6Hgf7WBt6lzMeiyyWVn3P1WVbwaczHmTzE9A5SyItTVgFYyvs/L/fXlaNgbw8v3azT+0eikVlWD
/vBHbzQumP23uBCjsYdrL9OWARwxs/nuLOzeXbPJTa/Xv6sUmQir5pC1YRLz3eA+CD8Z0XpcW8v9
MZWF36ryyXXf3yBIz6nzqz8Muyz0m5Qj7OexfYo/Ph3LqvkHUg7AuA==
""")
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
FAT_MAGIC = 0xcafebabe
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
LC_LOAD_DYLIB = 0xc
maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint')
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, fileobj, start=0, size=maxint):
if isinstance(fileobj, fileview):
self._fileobj = fileobj._fileobj
else:
self._fileobj = fileobj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._pos
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == os.SEEK_SET:
seekto += self._start
elif whence == os.SEEK_CUR:
seekto += self._start + self._pos
elif whence == os.SEEK_END:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
self._pos = seekto - self._start
def write(self, bytes):
here = self._start + self._pos
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.seek(here, os.SEEK_SET)
self._fileobj.write(bytes)
self._pos += len(bytes)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, 'read')
size = min(size, self._end - here)
self._fileobj.seek(here, os.SEEK_SET)
bytes = self._fileobj.read(size)
self._pos += len(bytes)
return bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by ncmds commands
for n in range(ncmds):
where = file.tell()
# Read command header
cmd, cmdsize = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmdsize - name_offset).decode()
load = load[:load.index('\0')]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + '\0'.encode())
# Seek to the next command
file.seek(where + cmdsize, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = fileview(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
nfat_arch = read_data(file, BIG_ENDIAN)
for n in range(nfat_arch):
# Read arch header
cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert(len(what) >= len(value))
do_file(open(path, 'r+b'))
if __name__ == '__main__':
main()
# TODO:
# Copy python.exe.manifest
# Monkeypatch distutils.sysconfig
|
mit
| 8,873,541,175,533,266,000 | 40.924686 | 238 | 0.659651 | false |
gunan/tensorflow
|
tensorflow/python/framework/graph_util_test.py
|
1
|
21542
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.saver import export_meta_graph
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
@test_util.run_v1_only("b/120545219")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.VariableV1(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.VariableV1(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.VariableV1(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.VariableV1(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testExtractSubGraphWithInvalidDestNodes(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
with self.assertRaisesRegexp(TypeError, "must be a list"):
graph_util.extract_sub_graph(graph_def, "n1")
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
new_node.input.extend(inputs)
return new_node
def create_constant_node_def(self,
name,
value,
dtype,
shape=None,
inputs=None):
node = self.create_node_def("Const", name, inputs or [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]),
self.create_node_def("Identity", "B", ["C"]),
self.create_node_def("Identity", "C", ["D"]),
self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]),
self.create_node_def("Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
def testRemoveIdentityUsedAsControlInputInConst(self):
"""Check that Identity nodes used as control inputs are not removed."""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertProtoEquals(graph_def,
graph_util.remove_training_nodes(graph_def))
class ConvertVariablesToConstantsTest(test.TestCase):
def _ensure_no_variables_in_graph(self, graph_def):
"""Ensures there are no variables in the graph."""
for node in graph_def.node:
self.assertNotIn(
node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
def _test_variable_to_const_conversion(self, use_resource):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=use_resource):
variable_node = variable_scope.get_variable(
"variable_node", initializer=1.0)
another_variable = variable_scope.get_variable(
"unused_variable_node", initializer=1.0)
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variable_node.initializer)
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is
# set, note that if variable_names_whitelist is not set an error will
# be thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
self.evaluate(another_variable.initializer)
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable
# not being a const.
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
if use_resource:
self.assertEqual(variable_node.op, "VarHandleOp")
else:
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
self._ensure_no_variables_in_graph(constant_graph_def)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
def test_resource_variable_can_be_written_after_blacklisting(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=True):
variable_node = variable_scope.get_variable(
"variable_node", initializer=1.0)
another_variable = variable_scope.get_variable(
"unused_variable_node", initializer=2.0)
with ops.control_dependencies([
variable_node.assign(another_variable + variable_node)]):
output_node = array_ops.identity(variable_node, name="output_node")
initializer_name = variable_node.initializer.name
with session.Session() as sess:
self.evaluate(variable_node.initializer)
self.evaluate(another_variable.initializer)
output = self.evaluate(output_node)
self.assertNear(3.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# Test variable name black list. This should result in the variable
# not being a const. Furthermore, the paths that read from and assign
# to the blacklisted variable should continue to be valid.
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node", initializer_name],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
self.assertEqual(variable_node.op, "VarHandleOp")
# Now we make sure another_variable is now a constant, but the original
# variable is not, and that the graph can be executed and update the
# variable can be updated with each execution.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def_with_blacklist, name="")
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
self.evaluate(sess.graph.get_operation_by_name(initializer_name))
output = self.evaluate(output_node)
self.assertNear(3.0, output, 0.00001)
output = self.evaluate(output_node)
self.assertNear(5.0, output, 0.00001)
def _inline_functions(self, graph_def, arrays):
meta_graph = export_meta_graph(graph_def=graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for name in arrays:
fetch_collection.node_list.value.append(name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function
# inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _test_convert_variables_with_functions(self, inline_functions):
"""Freezes a graph with functions."""
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
_ = math_ops_lib.multiply(defun_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variables.variables_initializer([variable_node]))
variable_graph_def = sess.graph.as_graph_def()
if inline_functions:
# Run Grappler to create the VarOpHandle --> Placeholder -->
# ResourceVariable pattern.
variable_graph_def = self._inline_functions(
variable_graph_def, ["variable_node", "output_node"])
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"])
self._ensure_no_variables_in_graph(constant_graph_def)
def testReferenceVariables(self):
"""Freezes a graph with reference variables."""
self._test_variable_to_const_conversion(use_resource=False)
def testResourceVariables(self):
"""Freezes a graph with resource variables."""
self._test_variable_to_const_conversion(use_resource=True)
def testWithFunctions(self):
"""Freezes a graph with functions."""
self._test_convert_variables_with_functions(inline_functions=False)
def testWithInlinedFunctions(self):
"""Freezes a graph with functions that have been inlined using Grappler."""
self._test_convert_variables_with_functions(inline_functions=True)
def testGraphWithSwitch(self):
"""Freezes a graph which contains a Switch with type RESOURCE_DT."""
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=True):
x = variable_scope.get_variable("var_x", initializer=1.0)
y = variable_scope.get_variable("var_y", initializer=2.0)
f1 = lambda: variable_scope.get_variable("var_f1", initializer=17.0)
f2 = lambda: variable_scope.get_variable("var_f2", initializer=23.0)
cond_node = control_flow_ops.case([(gen_math_ops.less(x, y), f1)],
default=f2)
_ = math_ops_lib.multiply(cond_node, 2.0, name="output_node")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
variable_graph_def = sess.graph.as_graph_def()
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"])
self._ensure_no_variables_in_graph(constant_graph_def)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -4,882,040,189,363,117,000 | 40.586873 | 80 | 0.639541 | false |
brownplt/k3
|
dj-plt-belay/pltbelay/models.py
|
1
|
1096
|
import belaylibs.models as bcap
from django.db import models
class BelayAccount(bcap.Grantable):
station_url = models.CharField(max_length=200)
class PendingLogin(bcap.Grantable):
# Key is for this server to trust the openID provider's request
key = models.CharField(max_length=36)
# ClientKey is a secret provided by the client to trust that new
# windows were served from this server
clientkey = models.CharField(max_length=36)
class PltCredentials(bcap.Grantable):
username = models.CharField(max_length=200)
salt = models.CharField(max_length=200)
hashed_password = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class GoogleCredentials(bcap.Grantable):
identity = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class BelaySession(bcap.Grantable):
session_id = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class Stash(bcap.Grantable):
stashed_content = models.TextField(max_length=1000)
class PendingAccount(bcap.Grantable):
email = models.TextField(max_length=100)
|
apache-2.0
| 2,200,626,802,297,134,300 | 32.212121 | 66 | 0.778285 | false |
DeppSRL/open_bilanci
|
bilanci_project/bilanci/management/commands/couch2pg.py
|
1
|
27680
|
from collections import OrderedDict
import logging
from optparse import make_option
from pprint import pprint
from os import listdir
from os.path import isfile, join
from django.conf import settings
from django.core.management import BaseCommand, call_command
from django.db import connection
from django.db.transaction import set_autocommit, commit
from django.utils.text import slugify
from bilanci import tree_models
from bilanci.models import Voce, ValoreBilancio, ImportXmlBilancio
from bilanci.utils import couch, gdocs, email_utils
from bilanci.utils.comuni import FLMapper
from territori.models import Territorio, ObjectDoesNotExist
from .somma_funzioni import SommaFunzioniMixin
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--dry-run',
dest='dryrun',
action='store_true',
default=False,
help='Set the dry-run command mode: nothing is written in the couchdb'),
make_option('--complete',
dest='complete',
action='store_true',
default=False,
help='After data import calculate indicators and updates opendata zip file'),
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help='Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All"'),
make_option('--start-from',
dest='start_from',
default='',
help='Start importing cities from such city. Use codfinloc: GARAGUSO--4170470090'),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server alias to connect to (staging | localhost). Defaults to staging.'),
make_option('--create-tree',
dest='create_tree',
action='store_true',
default=False,
help='Force recreating simplified tree leaves from csv file or gdocs (remove all values)'),
make_option('--force-google',
dest='force_google',
action='store_true',
default=False,
help='Force reloading mapping file and simplified subtrees leaves from gdocs (invalidate the csv cache)'),
make_option('--tree-node-slug',
dest='tree_node_slug',
default=None,
help='Voce slug of the tree model to start the import from. Example: consuntivo-entrate-imposte-e-tasse'),
make_option('--couch-path',
dest='couch_path_string',
default=None,
help='CouchDB keys sequence (CSV) to identify the import starting point. '
'Must be specified together with the treee-node-slug option. '
'Example: consuntivo,entrate,imposte'),
make_option('--append',
dest='append',
action='store_true',
default=False,
help='Use the log file appending instead of overwriting (used when launching shell scripts)'),
)
help = 'Import values from the simplified couchdb database into a Postgresql server'
dryrun = False
logger = logging.getLogger('management')
partial_import = False
couch_path = None
accepted_bilanci_types = ['preventivo', 'consuntivo']
somma_funzioni_branches = [
'preventivo-spese-spese-somma-funzioni',
'consuntivo-spese-cassa-spese-somma-funzioni',
'consuntivo-spese-impegni-spese-somma-funzioni',
]
considered_tipo_bilancio = accepted_bilanci_types
considered_somma_funzioni = somma_funzioni_branches
# somma_funzioni_slug_baseset: dict that stores the slugs needed to compute somma funzioni branches
somma_funzioni_slug_baseset = {}
#if the import is partial root_treenode is the root node of the sub-tree to be imported
root_treenode = None
root_descendants = None
import_set = OrderedDict()
imported_xml = None
years = None
cities_param = None
cities = None
voci_dict = None
couchdb = None
comuni_dicts = {}
def apply_somma_funzioni_patch(self, voce_sum, vb_filters, vb_dict):
"""
Compute spese correnti and spese per investimenti for funzioni, and write into spese-somma
Overwrite values if found.
"""
components = voce_sum.get_components_somma_funzioni()
# self.logger.debug("Applying somma_funzioni_patch to {0}".format(voce_sum.slug))
vb = []
for c in components:
try:
vb.append(vb_dict[c.slug])
except KeyError:
self.logger.error("Somma funz: cannot find slug: {} in vb_dict".format(c.slug))
return
valore = vb[0]['valore'] + vb[1]['valore']
valore_procapite = vb[0]['valore_procapite'] + vb[1]['valore_procapite']
ValoreBilancio.objects.create(
territorio=vb_filters['territorio'],
anno=vb_filters['anno'],
voce=voce_sum,
valore=valore,
valore_procapite=valore_procapite
)
def create_voci_tree(self, force_google):
"""
Create a Voci tree. If the tree exists, then it is deleted.
"""
if Voce.objects.count() > 0:
Voce.objects.all().delete()
# get simplified leaves (from csv or gdocs), to build the voices tree
simplified_leaves = gdocs.get_simplified_leaves(force_google=force_google)
self.create_voci_preventivo_tree(simplified_leaves)
self.create_voci_consuntivo_tree(simplified_leaves)
sf = SommaFunzioniMixin()
sf.create_somma_funzioni()
def create_voci_preventivo_tree(self, simplified_leaves):
# create preventivo root
subtree_node = Voce(denominazione='Preventivo', slug='preventivo')
subtree_node.insert_at(None, save=True, position='last-child')
# the preventivo subsections
subtrees = OrderedDict([
('preventivo-entrate', 'Preventivo entrate'),
('preventivo-spese', 'Preventivo spese'),
])
# add all leaves from the preventivo sections under preventivo
# entrate and spese are already considered
for subtree_slug, subtree_denominazione in subtrees.items():
for leaf_bc in simplified_leaves[subtree_slug]:
# add this leaf to the subtree, adding all needed intermediate nodes
self.add_leaf(leaf_bc, subtree_node)
def create_voci_consuntivo_tree(self, simplified_leaves):
# create consuntivo root
subtree_node = Voce(denominazione='Consuntivo', slug='consuntivo')
subtree_node.insert_at(None, save=True, position='last-child')
subtrees = OrderedDict([
('consuntivo-entrate', {
'denominazione': u'Consuntivo entrate',
'sections': [u'Accertamenti', u'Riscossioni in conto competenza', u'Riscossioni in conto residui',
u'Cassa']
}),
('consuntivo-spese', {
'denominazione': u'Consuntivo spese',
'sections': [u'Impegni', u'Pagamenti in conto competenza', u'Pagamenti in conto residui', u'Cassa']
}),
])
for subtree_slug, subtree_structure in subtrees.items():
for section_name in subtree_structure['sections']:
for leaf_bc in simplified_leaves[subtree_slug]:
bc = leaf_bc[:]
bc.insert(1, section_name)
self.add_leaf(bc, subtree_node, section_slug=slugify(section_name))
def add_leaf(self, breadcrumbs, subtree_node, section_slug=''):
"""
Add a leaf to the subtree, given the breadcrumbs list.
Creates the needed nodes in the process.
"""
self.logger.info(u"adding leaf {}".format(",".join(breadcrumbs)))
# skip 'totale' leaves (as totals values are attached to non-leaf nodes)
if 'totale' in [bc.lower() for bc in breadcrumbs]:
self.logger.info(u"skipped leaf {}".format(",".join(breadcrumbs)))
return
# copy breadcrumbs and remove last elements if empty
bc = breadcrumbs[:]
while not bc[-1]:
bc.pop()
prefix_slug = subtree_node.slug
current_node = subtree_node
for item in bc:
if current_node.get_children().filter(denominazione__iexact=item).count() == 0:
slug = u"{0}-{1}".format(prefix_slug, u"-".join(slugify(unicode(i)) for i in bc[0:bc.index(item) + 1]))
node = Voce(denominazione=item, slug=slug)
node.insert_at(current_node, save=True, position='last-child')
if bc[-1] == item:
return
else:
node = current_node.get_children().get(denominazione__iexact=item)
current_node = node
def couch_connect(self, couchdb_server):
# connect to couch database
couchdb_server_alias = couchdb_server
couchdb_dbname = settings.COUCHDB_SIMPLIFIED_NAME
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server alias.")
self.couchdb = couch.connect(
couchdb_dbname,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
def set_years(self, years):
# set considered years considering cases with - and ,
# Example
# 2003-2006
# or 2003,2004,2010
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years_list = range(int(start_year), int(end_year) + 1)
else:
years_list = [int(y.strip()) for y in years.split(",") if
settings.APP_START_YEAR <= int(y.strip()) <= settings.APP_END_YEAR]
if not years_list:
raise Exception("No suitable year found in {0}".format(years))
self.years = years_list
def set_cities(self, cities_codes, start_from):
# set considered cities
mapper = FLMapper()
if not cities_codes:
if start_from:
cities_codes = 'all'
all_cities = mapper.get_cities(cities_codes, logger=self.logger)
try:
cities_finloc = all_cities[all_cities.index(start_from):]
except ValueError:
raise Exception("Start-from city not found in cities complete list, use name--cod_finloc. "
"Example: ZUNGRI--4181030500")
else:
self.logger.info("Processing cities starting from: {0}".format(start_from))
else:
raise Exception("Missing cities parameter or start-from parameter")
else:
cities_finloc = mapper.get_cities(cities_codes, logger=self.logger)
finloc_numbers = [c[-10:] for c in cities_finloc]
slug_list = []
for numb in finloc_numbers:
slug_list.append(Territorio.objects.get(territorio="C", cod_finloc__endswith=numb).slug)
self.cities = Territorio.objects.filter(territorio="C", slug__in=slug_list)
def checks_partial_import(self, tree_node_slug, couch_path_string):
# based on the type of import set the type of bilancio that is considered
# sets branches of somma funzioni considered by the import
self.partial_import = True
#depending on tree node slug, couch path string sets considered tipo bilancio
self.couch_path = [unicode(x) for x in couch_path_string.split(",")]
# check that tree_node_slug exists in the Voce tree
try:
self.root_treenode = Voce.objects.get(slug=tree_node_slug)
except ObjectDoesNotExist:
self.logger.error(
"Voce with slug:{0} not present in Voce table. "
"Run update_bilancio_tree before running couch2pg".format(
tree_node_slug))
exit()
self.root_descendants = self.root_treenode.get_descendants(include_self=True)
self.considered_tipo_bilancio = self.root_treenode. \
get_ancestors(include_self=True, ascending=False). \
get(slug__in=self.accepted_bilanci_types).slug
# checks which branches of somma-funzioni are interested by the import
self.considered_somma_funzioni = self.root_descendants. \
filter(slug__in=self.somma_funzioni_branches). \
values_list('slug', flat=True)
def prepare_for_import(self):
##
# prepare_for_import
# 1) creates the import_set: the complete dict of cities, years and tipo bilancio that will be imported by the
# task
# 2) creates values_to_delete: a queryset that includes all ValoriBilancio
# that correspond to the bilancio selected by the import
# 3) gets the info about Xml import and removes the keys relative to cities, years and tipo_bilancio
# that have been imported via Xml
# 4) excludes from values_to_delete the values of bilancio imported via XML: they won't be deleted
# 5) fills somma_funzioni_slug_baseset with a dict that associates the slug of the root node of a
# somma-funzioni branch with the set of slugs needed to create it
# creates a dict with year as a key and value: a list of considered_bilancio_type(s)
years_dict = OrderedDict((year, self.considered_tipo_bilancio) for year in self.years)
# creates a dict in which for each city considered the value is the previous dict
self.import_set = OrderedDict((territorio, years_dict) for territorio in self.cities)
# construct values_to_delete
values_to_delete = ValoreBilancio.objects.filter(territorio__in=self.cities, anno__in=self.years)
if self.partial_import:
values_to_delete = values_to_delete.filter(voce__in=self.root_descendants)
# get data about ImportXml: if there is data that has been imported from XML for a city/ year
# then the couch import must NOT overwrite that data
self.imported_xml = ImportXmlBilancio.objects. \
filter(territorio__in=self.cities, anno__in=self.years, tipologia__in=self.considered_tipo_bilancio). \
order_by('territorio', 'anno')
if len(self.imported_xml) > 0:
for i in self.imported_xml:
self.logger.warning(
"BILANCIO:{} YEAR:{} CITY:{} will have to be reimported again: it was imported with xml". \
format(i.tipologia.title(), i.anno, i.territorio.denominazione))
# deletes ValoriBilanci that will be imported afterwards: this speeds up the import
if self.partial_import:
self.logger.info("Deleting values for selected cities, years and subtree")
else:
self.logger.info("Deleting values for selected cities, years")
if not self.dryrun and ValoreBilancio.objects.all().count() > 0:
if self.partial_import is False and self.cities_param.lower() == 'all':
# sql query to delete all values in ValoreBilancio table: this should cut the time
cursor = connection.cursor()
cursor.execute("TRUNCATE bilanci_valorebilancio", )
else:
values_to_delete.delete()
self.logger.info("Done deleting")
# creates somma_funzioni_slug_baseset
for slug in self.considered_somma_funzioni:
components = Voce.objects.get(slug=slug).get_components_somma_funzioni()
descendants = []
for c in components:
descendants.extend(c.get_descendants(include_self=True))
self.somma_funzioni_slug_baseset[slug] = descendants
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
self.dryrun = options['dryrun']
complete = options['complete']
force_google = options['force_google']
create_tree = options['create_tree']
tree_node_slug = options['tree_node_slug']
couch_path_string = options['couch_path_string']
if tree_node_slug and couch_path_string is None or couch_path_string and tree_node_slug is None:
self.logger.error("Couch path and tree node must be both specified. Quitting")
exit()
if options['append'] is True:
self.logger = logging.getLogger('management_append')
###
# connect to couchdb
###
self.couch_connect(options['couchdb_server'])
###
# cities
###
self.cities_param = options['cities']
start_from = options['start_from']
self.set_cities(self.cities_param, start_from)
if len(self.cities) == 0:
self.logger.info("No cities to process. Quit")
return
# check if debug is active: the task may fail
if settings.DEBUG is True and settings.INSTANCE_TYPE != 'development' and len(self.cities) > 4000:
self.logger.error("DEBUG settings is True, task will fail. Disable DEBUG and retry")
exit()
###
# set considered years
###
self.set_years(options['years'])
# if it's a partial import
# * checks which kind of bilancio is considered
# * checks which branch of somma-funzioni has to be calculated
if tree_node_slug and couch_path_string:
tree_node_slug = unicode(tree_node_slug)
couch_path_string = unicode(couch_path_string)
self.checks_partial_import(tree_node_slug, couch_path_string)
# create the tree if it does not exist or if forced to do so
if create_tree or Voce.objects.count() == 0:
if not self.dryrun:
self.create_voci_tree(force_google=force_google)
# build the map of slug to pk for the Voce tree
self.voci_dict = Voce.objects.get_dict_by_slug()
# considering years,cities and limitations set creates a comprehensive map of all bilancio to be imported,
# deletes old values before import
self.prepare_for_import()
counter = 100
for territorio, city_years in self.import_set.iteritems():
city_finloc = territorio.cod_finloc
# get all budgets data for the city
city_budget = self.couchdb.get(city_finloc)
if city_budget is None:
# if city budget is not found, try again taking out apostrophe and re-slugging, this deals with
# slug name changes from finanza locale
if "'" in territorio.nome:
nome_senza_apostrofo = territorio.nome.replace("'", "")
finloc_number = city_finloc[-10:]
city_finloc_noapostrophe = u"{}--{}".format(slugify(nome_senza_apostrofo), finloc_number).upper()
city_budget = self.couchdb.get(city_finloc_noapostrophe)
if city_budget is None:
self.logger.warning(u"Document '{}' or '{}' not found in couchdb instance. Skipping.".format(city_finloc, city_finloc_noapostrophe))
continue
else:
self.logger.warning(u"Document '{}' not found in couchdb instance. Skipping.".format(city_finloc))
continue
self.logger.debug(u"City of {0}".format(city_finloc))
if counter == 100:
self.logger.info(u"Reached city of '{0}', continuing...".format(city_finloc))
counter = 0
else:
counter += 1
for year, certificati_to_import in city_years.iteritems():
if str(year) not in city_budget:
self.logger.warning(u" {} - {} not found. Skip".format(city_finloc, year))
continue
# POPULATION
# fetch valid population, starting from this year
# if no population found, set it to None, as not to break things
try:
(pop_year, population) = territorio.nearest_valid_population(year)
except TypeError:
population = None
# self.logger.debug("::Population: {0}".format(population))
# build a BilancioItem tree, out of the couch-extracted dict
# for the given city and year
# add the totals by extracting them from the dict, or by computing
city_year_budget_dict = city_budget[str(year)]
if self.partial_import is True:
self.logger.info(u"- Processing year: {}, subtree: {}".format(year, tree_node_slug))
# start from a custom node
path_not_found = False
city_year_budget_node_dict = city_year_budget_dict.copy()
# get the starting node in couchdb data
for k in self.couch_path:
try:
city_year_budget_node_dict = city_year_budget_node_dict[k]
except KeyError:
self.logger.warning(
"Couch path:{0} not present for {1}, anno:{2}".format(self.couch_path,
territorio.cod_finloc,
str(year)))
path_not_found = True
break
# if data path is found in the couch document, write data into postgres db
if path_not_found is False:
city_year_node_tree_patch = tree_models.make_tree_from_dict(
city_year_budget_node_dict, self.voci_dict, path=[tree_node_slug],
population=population
)
# writes new sub-tree
if not self.dryrun:
tree_models.write_tree_to_vb_db(territorio, year, city_year_node_tree_patch, self.voci_dict)
else:
# import tipo_bilancio considered
# normally is preventivo and consuntivo
# otherwise only one of them
for tipo_bilancio in certificati_to_import:
certificato_tree = tree_models.make_tree_from_dict(
city_year_budget_dict[tipo_bilancio], self.voci_dict, path=[unicode(tipo_bilancio)],
population=population
)
if len(certificato_tree.children) == 0:
continue
self.logger.debug(u"- Processing year: {} bilancio: {}".format(year, tipo_bilancio))
if not self.dryrun:
tree_models.write_tree_to_vb_db(territorio, year, certificato_tree, self.voci_dict)
# applies somma-funzioni patch only to the interested somma-funzioni branches (if any)
if len(self.considered_somma_funzioni) > 0:
self.logger.debug("Somma funzioni patch")
vb_filters = {
'territorio': territorio,
'anno': year,
}
for somma_funzioni_branch in self.considered_somma_funzioni:
# get data for somma-funzioni patch, getting only the needed ValoreBilancio using the
# somma_funzioni_slug_baseset
needed_slugs = self.somma_funzioni_slug_baseset[somma_funzioni_branch]
vb = ValoreBilancio.objects. \
filter(**vb_filters). \
filter(voce__slug__in=needed_slugs). \
values_list('voce__slug', 'valore', 'valore_procapite')
if len(vb) == 0:
self.logger.debug("Skipping {} branch: no values in db".format(somma_funzioni_branch))
continue
vb_dict = dict((v[0], {'valore': v[1], 'valore_procapite': v[2]}) for v in vb)
if not self.dryrun:
for voce_slug in Voce.objects.get(slug=somma_funzioni_branch).get_descendants(
include_self=True):
self.apply_somma_funzioni_patch(voce_slug, vb_filters, vb_dict)
del vb_dict
# actually save data into posgres
self.logger.debug("Write valori bilancio to postgres")
self.logger.info("Done importing couchDB values into postgres")
if self.cities_param.lower() != 'all':
for bilancio_xml in self.imported_xml:
self.logger.info(
"IMPORTANT: Re-import XML bilancio {},{},{}".format(bilancio_xml.territorio, bilancio_xml.anno,
bilancio_xml.tipologia))
else:
# directly import xml files in default folder for bilancio XML
xml_path = settings.OPENDATA_XML_ROOT
xml_files = [f for f in listdir(xml_path) if isfile(join(xml_path, f))]
for f in xml_files:
self.logger.info(u"Import XML bilancio file:'{}'".format(f))
call_command('xml2pg', verbosity=1, file=f, interactive=False)
if len(xml_files) != len(self.imported_xml):
self.logger.error(
"Found {} Xml files compared to {} objs in ImportXML table in DB!!".format(len(xml_files),
len(self.imported_xml)))
if complete and not self.dryrun and not self.partial_import:
##
# complete the import with medians, indicators and update opendata (zip files)
##
self.logger.info(u"Update indicators medians")
call_command('data_completion', verbosity=2, years=options['years'], cities=options['cities'],
interactive=False)
email_utils.send_notification_email(
msg_string="Couch2pg, update opendata, indicators and medians has finished.")
else:
email_utils.send_notification_email(msg_string="Couch2pg has finished.")
|
mit
| 6,425,402,298,802,851,000 | 43.718901 | 156 | 0.571676 | false |
audreyr/cookiecutter
|
tests/test_specify_output_dir.py
|
1
|
2352
|
"""Tests for cookiecutter's output directory customization feature."""
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
'cookiecutter': {
'email': 'raphael@hackebrot.de',
'full_name': 'Raphael Pierzina',
'github_username': 'hackebrot',
'version': '0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
"""Fixture to prepare test output directory."""
return str(tmpdir.mkdir('output'))
@pytest.fixture
def template(tmpdir):
"""Fixture to prepare test template directory."""
template_dir = tmpdir.mkdir('template')
template_dir.join('cookiecutter.json').ensure(file=True)
return str(template_dir)
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
"""Fixture. Automatically mock cookiecutter's function with expected output."""
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
"""Fixture. Automatically mock cookiecutter's function with expected output."""
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
"""Fixture. Automatically mock cookiecutter's function with expected output."""
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
"""Verify output dir location is correctly passed."""
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
repo_dir=template,
context=context,
overwrite_if_exists=False,
skip_if_file_exists=False,
output_dir=output_dir,
accept_hooks=True,
)
def test_default_output_dir(mocker, template, context):
"""Verify default output dir is current working folder."""
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template)
mock_gen_files.assert_called_once_with(
repo_dir=template,
context=context,
overwrite_if_exists=False,
skip_if_file_exists=False,
output_dir='.',
accept_hooks=True,
)
|
bsd-3-clause
| 3,686,762,791,754,625,000 | 28.037037 | 83 | 0.679847 | false |
boooka/GeoPowerOff
|
venv/lib/python2.7/site-packages/test/grab_get_request.py
|
1
|
1292
|
# coding: utf-8
from unittest import TestCase
import os
from grab import Grab, GrabMisuseError
from .util import (GRAB_TRANSPORT, TMP_DIR,
ignore_transport, only_transport)
from .tornado_util import SERVER
from grab.extension import register_extensions
class GrabSimpleTestCase(TestCase):
def setUp(self):
SERVER.reset()
def test_get(self):
SERVER.RESPONSE['get'] = 'Final Countdown'
g = Grab(transport=GRAB_TRANSPORT)
g.go(SERVER.BASE_URL)
self.assertTrue('Final Countdown' in g.response.body)
def test_body_content(self):
SERVER.RESPONSE['get'] = 'Simple String'
g = Grab(transport=GRAB_TRANSPORT)
g.go(SERVER.BASE_URL)
self.assertEqual('Simple String', g.response.body)
#self.assertEqual('Simple String' in g.response.runtime_body)
def test_status_code(self):
SERVER.RESPONSE['get'] = 'Simple String'
g = Grab(transport=GRAB_TRANSPORT)
g.go(SERVER.BASE_URL)
self.assertEqual(200, g.response.code)
def test_parsing_response_headers(self):
SERVER.RESPONSE['headers'] = [('Hello', 'Grab')]
g = Grab(transport=GRAB_TRANSPORT)
g.go(SERVER.BASE_URL)
self.assertTrue(g.response.headers['Hello'] == 'Grab')
|
apache-2.0
| -8,435,255,135,616,183,000 | 32.128205 | 69 | 0.652477 | false |
beetbox/audioread
|
audioread/rawread.py
|
1
|
4378
|
# This file is part of audioread.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses standard-library modules to read AIFF, AIFF-C, and WAV files."""
import wave
import aifc
import sunau
import audioop
import struct
import sys
from .exceptions import DecodeError
# Produce two-byte (16-bit) output samples.
TARGET_WIDTH = 2
# Python 3.4 added support for 24-bit (3-byte) samples.
if sys.version_info > (3, 4, 0):
SUPPORTED_WIDTHS = (1, 2, 3, 4)
else:
SUPPORTED_WIDTHS = (1, 2, 4)
class UnsupportedError(DecodeError):
"""File is not an AIFF, WAV, or Au file."""
class BitWidthError(DecodeError):
"""The file uses an unsupported bit width."""
def byteswap(s):
"""Swaps the endianness of the bytestring s, which must be an array
of shorts (16-bit signed integers). This is probably less efficient
than it should be.
"""
assert len(s) % 2 == 0
parts = []
for i in range(0, len(s), 2):
chunk = s[i:i + 2]
newchunk = struct.pack('<h', *struct.unpack('>h', chunk))
parts.append(newchunk)
return b''.join(parts)
class RawAudioFile(object):
"""An AIFF, WAV, or Au file that can be read by the Python standard
library modules ``wave``, ``aifc``, and ``sunau``.
"""
def __init__(self, filename):
self._fh = open(filename, 'rb')
try:
self._file = aifc.open(self._fh)
except aifc.Error:
# Return to the beginning of the file to try the next reader.
self._fh.seek(0)
else:
self._needs_byteswap = True
self._check()
return
try:
self._file = wave.open(self._fh)
except wave.Error:
self._fh.seek(0)
pass
else:
self._needs_byteswap = False
self._check()
return
try:
self._file = sunau.open(self._fh)
except sunau.Error:
self._fh.seek(0)
pass
else:
self._needs_byteswap = True
self._check()
return
# None of the three libraries could open the file.
self._fh.close()
raise UnsupportedError()
def _check(self):
"""Check that the files' parameters allow us to decode it and
raise an error otherwise.
"""
if self._file.getsampwidth() not in SUPPORTED_WIDTHS:
self.close()
raise BitWidthError()
def close(self):
"""Close the underlying file."""
self._file.close()
self._fh.close()
@property
def channels(self):
"""Number of audio channels."""
return self._file.getnchannels()
@property
def samplerate(self):
"""Sample rate in Hz."""
return self._file.getframerate()
@property
def duration(self):
"""Length of the audio in seconds (a float)."""
return float(self._file.getnframes()) / self.samplerate
def read_data(self, block_samples=1024):
"""Generates blocks of PCM data found in the file."""
old_width = self._file.getsampwidth()
while True:
data = self._file.readframes(block_samples)
if not data:
break
# Make sure we have the desired bitdepth and endianness.
data = audioop.lin2lin(data, old_width, TARGET_WIDTH)
if self._needs_byteswap and self._file.getcomptype() != 'sowt':
# Big-endian data. Swap endianness.
data = byteswap(data)
yield data
# Context manager.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
# Iteration.
def __iter__(self):
return self.read_data()
|
mit
| 6,935,302,251,535,896,000 | 27.802632 | 75 | 0.595021 | false |
rowinggolfer/openmolar2
|
src/tests/class_tests/testTreatmentItem.py
|
1
|
3699
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
import os, sys
lib_openmolar_path = os.path.abspath("../../")
if not lib_openmolar_path == sys.path[0]:
sys.path.insert(0, lib_openmolar_path)
from lib_openmolar.common.db_orm.treatment_item import TreatmentItem
from lib_openmolar.client.connect import ClientConnection
from lib_openmolar.client.db_orm.patient_model import PatientModel
from lib_openmolar.client.qt4.widgets import ToothData
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
#ClientConnection().connect()
#pt = PatientModel(2)
#SETTINGS.set_current_patient(pt)
pass
def tearDown(self):
pass
def spawn_all_proc_code_tis(self):
'''
create all treatment items generated from procedure codes
'''
for proc_code in SETTINGS.PROCEDURE_CODES:
item = TreatmentItem(proc_code)
item.set_px_clinician(1)
if item.tooth_required:
item.set_teeth([7])
if item.surfaces_required:
fill, surfs = "MODBL",""
for char in fill:
surfs += char
try:
item.set_surfaces(surfs)
except TreatmentItemException:
pass
if item.pontics_required:
continue
##TODO - this is busted!
if item.is_bridge:
pontics = [2,3,4,5,6]
i = 0
while i < 5 and item.entered_span < item.required_span:
i += 1
item.set_pontics(pontics[:i])
elif item.is_prosthetics:
item.set_pontics([3,4])
yield item
def test_proc_codes(self):
for item in self.spawn_all_proc_code_tis():
valid, errors = item.check_valid()
self.assertTrue(valid, "%s %s"% (item, errors))
def test_proc_codes_are_chartable(self):
for item in self.spawn_all_proc_code_tis():
if item.is_chartable:
td = ToothData(item.tooth)
td.from_treatment_item(item)
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 5,649,325,461,879,931,000 | 38.774194 | 79 | 0.47283 | false |
andrewgodwin/django-channels
|
channels/generic/http.py
|
1
|
3131
|
from channels.consumer import AsyncConsumer
from ..exceptions import StopConsumer
class AsyncHttpConsumer(AsyncConsumer):
"""
Async HTTP consumer. Provides basic primitives for building asynchronous
HTTP endpoints.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.body = []
async def send_headers(self, *, status=200, headers=None):
"""
Sets the HTTP response status and headers. Headers may be provided as
a list of tuples or as a dictionary.
Note that the ASGI spec requires that the protocol server only starts
sending the response to the client after ``self.send_body`` has been
called the first time.
"""
if headers is None:
headers = []
elif isinstance(headers, dict):
headers = list(headers.items())
await self.send(
{"type": "http.response.start", "status": status, "headers": headers}
)
async def send_body(self, body, *, more_body=False):
"""
Sends a response body to the client. The method expects a bytestring.
Set ``more_body=True`` if you want to send more body content later.
The default behavior closes the response, and further messages on
the channel will be ignored.
"""
assert isinstance(body, bytes), "Body is not bytes"
await self.send(
{"type": "http.response.body", "body": body, "more_body": more_body}
)
async def send_response(self, status, body, **kwargs):
"""
Sends a response to the client. This is a thin wrapper over
``self.send_headers`` and ``self.send_body``, and everything said
above applies here as well. This method may only be called once.
"""
await self.send_headers(status=status, **kwargs)
await self.send_body(body)
async def handle(self, body):
"""
Receives the request body as a bytestring. Response may be composed
using the ``self.send*`` methods; the return value of this method is
thrown away.
"""
raise NotImplementedError(
"Subclasses of AsyncHttpConsumer must provide a handle() method."
)
async def disconnect(self):
"""
Overrideable place to run disconnect handling. Do not send anything
from here.
"""
pass
async def http_request(self, message):
"""
Async entrypoint - concatenates body fragments and hands off control
to ``self.handle`` when the body has been completely received.
"""
if "body" in message:
self.body.append(message["body"])
if not message.get("more_body"):
try:
await self.handle(b"".join(self.body))
finally:
await self.disconnect()
raise StopConsumer()
async def http_disconnect(self, message):
"""
Let the user do their cleanup and close the consumer.
"""
await self.disconnect()
raise StopConsumer()
|
bsd-3-clause
| -8,351,045,490,236,470,000 | 33.032609 | 81 | 0.597253 | false |
CartoDB/cartoframes
|
cartoframes/viz/constants.py
|
1
|
2813
|
CARTO_VL_VERSION = 'v1.4'
CARTO_VL_DEV = '/dist/carto-vl.js'
CARTO_VL_URL = 'https://libs.cartocdn.com/carto-vl/{}/carto-vl.min.js'.format(CARTO_VL_VERSION)
AIRSHIP_VERSION = 'v2.3'
AIRSHIP_COMPONENTS_DEV = '/packages/components/dist/airship.js'
AIRSHIP_BRIDGE_DEV = '/packages/bridge/dist/asbridge.js'
AIRSHIP_MODULE_DEV = '/packages/components/dist/airship/airship.esm.js'
AIRSHIP_STYLES_DEV = '/packages/styles/dist/airship.css'
AIRSHIP_ICONS_DEV = '/packages/icons/dist/icons.css'
AIRSHIP_COMPONENTS_URL = 'https://libs.cartocdn.com/airship-components/{}/airship.js'.format(AIRSHIP_VERSION)
AIRSHIP_BRIDGE_URL = 'https://libs.cartocdn.com/airship-bridge/{}/asbridge.min.js'.format(AIRSHIP_VERSION)
AIRSHIP_MODULE_URL = 'https://libs.cartocdn.com/airship-components/{}/airship/airship.esm.js'.format(AIRSHIP_VERSION)
AIRSHIP_STYLES_URL = 'https://libs.cartocdn.com/airship-style/{}/airship.min.css'.format(AIRSHIP_VERSION)
AIRSHIP_ICONS_URL = 'https://libs.cartocdn.com/airship-icons/{}/icons.css'.format(AIRSHIP_VERSION)
STYLE_PROPERTIES = [
'color',
'width',
'filter',
'strokeWidth',
'strokeColor',
'transform',
'order',
'symbol',
'symbolPlacement',
'resolution'
]
LEGEND_PROPERTIES = [
'color',
'stroke_color',
'size',
'stroke_width'
]
VIZ_PROPERTIES_MAP = {
'color': 'color',
'stroke_color': 'strokeColor',
'size': 'width',
'stroke_width': 'strokeWidth',
'filter': 'filter'
}
LEGEND_TYPES = [
'basic',
'default',
'color-bins',
'color-bins-line',
'color-bins-point',
'color-bins-polygon',
'color-category',
'color-category-line',
'color-category-point',
'color-category-polygon',
'color-continuous',
'color-continuous-line',
'color-continuous-point',
'color-continuous-polygon',
'size-bins',
'size-bins-line',
'size-bins-point',
'size-category',
'size-category-line',
'size-category-point',
'size-continuous',
'size-continuous-line',
'size-continuous-point'
]
SINGLE_LEGEND = 'color-category'
WIDGET_TYPES = [
'basic',
'default',
'formula',
'histogram',
'category',
'animation',
'time-series'
]
FORMULA_OPERATIONS_VIEWPORT = {
'count': 'viewportCount',
'avg': 'viewportAvg',
'min': 'viewportMin',
'max': 'viewportMax',
'sum': 'viewportSum'
}
FORMULA_OPERATIONS_GLOBAL = {
'count': 'globalCount',
'avg': 'globalAvg',
'min': 'globalMin',
'max': 'globalMax',
'sum': 'globalSum'
}
CLUSTER_KEYS = [
'count',
'avg',
'min',
'max',
'sum'
]
CLUSTER_OPERATIONS = {
'count': 'clusterCount',
'avg': 'clusterAvg',
'min': 'clusterMin',
'max': 'clusterMax',
'sum': 'clusterSum'
}
THEMES = ['dark', 'light']
DEFAULT_LAYOUT_M_SIZE = 1
|
bsd-3-clause
| 5,031,585,193,760,696,000 | 23.042735 | 117 | 0.635265 | false |
novopl/fabutils
|
test/unit/fabutils/lint/test_is_pylint_compliant.py
|
1
|
1958
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from mock import patch, Mock
from pytest import raises, mark
from fabutils.lint import is_pylint_compliant
@patch('fabutils.lint.print', Mock())
@patch('fabutils.lint.infomsg', Mock())
def test_raises_ValueError_if_passed_iterable_is_actually_a_string():
with raises(ValueError):
is_pylint_compliant('fakepath')
@patch('fabutils.lint.local', Mock())
@patch('fabutils.lint.print', Mock())
@patch('fabutils.lint.infomsg', Mock())
def test_empty_paths():
with patch('fabutils.lint.local', Mock()):
is_pylint_compliant([])
@patch('fabutils.lint.local')
@patch('fabutils.lint.print', Mock())
@patch('fabutils.lint.infomsg', Mock())
def test_surrounds_paths_with_quotes(local):
is_pylint_compliant([
'test file 1',
'test file 2',
])
args, kw = local.call_args
assert '"test file 1"' in args[0]
assert '"test file 2"' in args[0]
@patch('fabutils.lint.local')
@patch('fabutils.lint.print', Mock())
@patch('fabutils.lint.infomsg', Mock())
def test_returns_True_if_calling_pep8_returns_0(local):
local.return_value = Mock()
local.return_value.return_code = 0
assert is_pylint_compliant(['test file 1']) == True
@patch('fabutils.lint.local')
@patch('fabutils.lint.print', Mock())
@patch('fabutils.lint.infomsg', Mock())
@mark.parametrize('retcode', [1, -2, 20, 80])
def test_returns_False_if_pep8_finishes_with_non_zero_result(local, retcode):
local.return_value = Mock()
local.return_value.return_code = retcode
assert is_pylint_compliant(['test file 1']) == False
@patch('fabutils.lint.local')
@patch('fabutils.lint.print')
@patch('fabutils.lint.infomsg', Mock())
def test_doesnt_print_empty_output(m_print, m_local):
out = Mock()
out.return_code = 0
out.strip = Mock(return_value='')
m_local.return_value = out
is_pylint_compliant(['test file 1'])
m_print.assert_not_called()
|
mit
| -7,350,630,813,657,264,000 | 26.577465 | 77 | 0.674157 | false |
nop33/indico-plugins
|
payment_paypal/tests/util_test.py
|
1
|
1388
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from mock import MagicMock
from wtforms import ValidationError
from indico_payment_paypal.util import validate_business
@pytest.mark.parametrize(('data', 'valid'), (
('foobar', False),
('foo@bar,com', False),
('example@example.com', True),
('X2345A789B12Cx', False),
('X2345A789B12', False),
('1234567890123', True),
('X2345A789B12C', True),
))
def test_validate_business(data, valid):
field = MagicMock(data=data)
if valid:
validate_business(None, field)
else:
with pytest.raises(ValidationError):
validate_business(None, field)
|
gpl-3.0
| -2,077,573,935,550,929,200 | 34.589744 | 78 | 0.692363 | false |
lbianch/nfl_elo
|
tests/test_inv_erf.py
|
1
|
2693
|
import unittest
import logging
import pickle
import inv_erf
from elo import probability_points as prob
logging.basicConfig(level=logging.INFO)
class TestInvErf(unittest.TestCase):
def setUp(self):
with open('random_state.pickle', 'rb') as f:
state = pickle.load(f)
inv_erf.random.setstate(state)
def test_sign(self):
self.assertEqual(inv_erf.sign(2.1), 1.0)
self.assertEqual(inv_erf.sign(-4.5), -1.0)
with self.assertRaises(TypeError):
inv_erf.sign('test')
def test_inv_erf(self):
from math import erf
self.assertEqual(inv_erf.inv_erf(0.0), 0.0)
with self.assertRaises(ValueError):
inv_erf.inv_erf(1.5)
with self.assertRaises(ValueError):
inv_erf.inv_erf(-1.5)
with self.assertRaises(TypeError):
inv_erf.inv_erf('0.0')
for x in [0.1, 0.25, 0.4, 0.6, 0.75, 0.9]:
self.assertAlmostEqual(inv_erf.inv_erf(erf(x)), x, 3)
def test_get_sigma(self):
with self.assertRaises(ValueError):
inv_erf.get_sigma(13.0, 1.2)
with self.assertRaises(ValueError):
inv_erf.get_sigma(-3.0, -0.2)
for pts in [3., 7., 13., 21., 45.]:
self.assertGreater(prob(pts), 0.5)
self.assertGreater(inv_erf.get_sigma(pts, prob(pts)), 11.08)
self.assertLess(prob(-pts), 0.5)
# Sigma is always positive
self.assertGreater(inv_erf.get_sigma(-pts, prob(-pts)), 11.08)
def test_spread(self):
# Seed was fixed in `setUp`, exploit it:
self.assertEqual(inv_erf.get_spread(10.0, prob(10.0)), 16)
# Now try to aggregate using known random values
N = 10000
pt = 14.0
random_data = [inv_erf.get_spread(pt, prob(pt), random_state=42) for _ in range(N)]
self.assertEqual(sum(random_data), 140461)
self.assertEqual(sum(x == 0 for x in random_data), 6)
# Now try to aggregate using unknown random values
inv_erf.random.seed()
random_data = sum(inv_erf.get_spread(pt, prob(pt)) for _ in range(N))
self.assertGreater(random_data, (pt - 0.5) * N)
self.assertLess(random_data, (pt + 0.5) * N)
# Test using known-value for `sigma`; sigma = 0.0 is non-random
self.assertEqual(inv_erf.get_spread(8.0, prob(8.0), 0.0), 8)
with self.assertRaises(TypeError):
inv_erf.get_spread('test', 0.75)
with self.assertRaises(TypeError):
inv_erf.get_spread(3.4, 'test')
with self.assertRaises(TypeError):
inv_erf.get_spread(3.4, 0.6, 'test')
if __name__ == '__main__':
unittest.main()
|
mit
| -6,879,620,005,380,825,000 | 36.929577 | 91 | 0.590048 | false |
google-research/google-research
|
learn_to_infer/run_ring.py
|
1
|
10211
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for transformer experiments.
"""
import os
from . import metrics
from . import plotting
from . import ring_dist
from . import ring_models
from . import train
from absl import app
from absl import flags
import jax
from jax.config import config
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as onp
flags.DEFINE_integer("num_encoders", 6,
"Number of encoder modules in the transformer.")
flags.DEFINE_integer("num_decoders", 6,
"Number of decoder modules in the transformer.")
flags.DEFINE_integer("num_heads", 8,
"Number of attention heads in the transformer.")
flags.DEFINE_integer("key_dim", 32,
"The dimension of the keys in the transformer.")
flags.DEFINE_integer("value_dim_per_head", 32,
"The dimension of the values in the transformer for each head.")
flags.DEFINE_integer("k", 2,
"The number of modes in the data.")
flags.DEFINE_integer("data_points_per_mode", 25,
"Number of data points to include per mode in the data.")
flags.DEFINE_boolean("parallel", True,
"If possible, train in parallel across devices.")
flags.DEFINE_integer("batch_size", 64,
"The batch size.")
flags.DEFINE_integer("eval_batch_size", 256,
"The batch size for evaluation.")
flags.DEFINE_integer("num_steps", int(1e6),
"The number of steps to train for.")
flags.DEFINE_float("lr", 1e-3,
"The learning rate for ADAM.")
flags.DEFINE_integer("summarize_every", 100,
"Number of steps between summaries.")
flags.DEFINE_integer("checkpoint_every", 5000,
"Number of steps between checkpoints.")
flags.DEFINE_boolean("clobber_checkpoint", False,
"If true, remove any existing summaries and checkpoints in logdir.")
flags.DEFINE_string("logdir", "/tmp/transformer",
"The directory to put summaries and checkpoints.")
flags.DEFINE_boolean("debug_nans", False,
"If true, run in debug mode and fail on nans.")
FLAGS = flags.FLAGS
def make_model(key,
num_encoders=4,
num_decoders=4,
num_heads=8,
value_dim=128,
data_points_per_mode=25,
k=10):
model = ring_models.RingInferenceMachine(
max_k=k,
max_num_data_points=k*data_points_per_mode, num_heads=num_heads,
num_encoders=num_encoders, num_decoders=num_decoders, qkv_dim=value_dim)
params = model.init_params(key)
return model, params
def sample_batch(key, batch_size, k, data_points_per_mode):
keys = jax.random.split(key, num=batch_size)
xs, cs, params = jax.vmap(
ring_dist.sample_params_and_points,
in_axes=(0, None, None, None, None, None, None, None, None,
None))(keys, k * data_points_per_mode, k, 1., 0.5, 2, .02,
jnp.zeros([2]), jnp.eye(2), 0.1)
return xs, cs, params
def make_loss(model,
k=2,
data_points_per_mode=25,
batch_size=128):
def sample_train_batch(key):
xs, _, params = sample_batch(key, batch_size, k, data_points_per_mode)
return xs, params
def loss(params, key):
key, subkey = jax.random.split(key)
xs, ring_params = sample_train_batch(key)
ks = jnp.full([batch_size], k)
losses = model.loss(
params, xs, ks*data_points_per_mode, ring_params, ks, subkey)
return jnp.mean(losses)
return jax.jit(loss)
def make_summarize(
model,
k=2,
data_points_per_mode=25,
eval_batch_size=256):
def sample_eval_batch(key):
return sample_batch(key, eval_batch_size, k, data_points_per_mode)
sample_eval_batch = jax.jit(sample_eval_batch)
def sample_single(key):
xs, cs, params = sample_batch(key, 1, k, data_points_per_mode)
return xs[0], cs[0], (params[0][0], params[1][0], params[2][0],
params[3][0])
def model_classify(params, inputs, batch_size):
return model.classify(params, inputs,
jnp.full([batch_size], k*data_points_per_mode),
jnp.full([batch_size], k))
def sample_and_classify_eval_batch(key, params):
xs, cs, true_ring_params = sample_eval_batch(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs, eval_batch_size)
return xs, cs, true_ring_params, tfmr_cs, tfmr_ring_params
def sample_and_classify_single_mm(key, params):
xs, cs, ring_params = sample_single(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs[jnp.newaxis], 1)
return xs, cs, ring_params, tfmr_cs, tfmr_ring_params
sample_and_classify_eval_batch = jax.jit(sample_and_classify_eval_batch)
sample_and_classify_single_mm= jax.jit(sample_and_classify_single_mm)
def summarize_baselines(writer, step, key):
key, subkey = jax.random.split(key)
xs, cs, _ = sample_eval_batch(subkey)
ks = onp.full([eval_batch_size], k)
baseline_metrics = metrics.compute_masked_baseline_metrics(
xs, cs, ks, ks*data_points_per_mode)
for method_name, method_metrics in baseline_metrics.items():
for metric_name, metric_val in method_metrics.items():
writer.scalar("%s/%s" % (method_name, metric_name),
metric_val, step=step)
print("%s %s: %0.3f" % (method_name, metric_name, metric_val))
def plot_params(num_data_points, writer, step, params, key):
outs = sample_and_classify_single_mm(key, params)
xs, true_cs, true_params, pred_cs, pred_params = outs
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
fig = plotting.plot_rings(
xs, k, true_cs, true_params, pred_cs, pred_params)
plot_image = plotting.plot_to_numpy_image(plt)
writer.image(
"%d_modes_%d_points" % (k, num_data_points), plot_image, step=step)
plt.close(fig)
def comparison_inference(params):
rings_inputs, true_cs = plotting.make_comparison_rings()
rings_inputs = rings_inputs[jnp.newaxis, Ellipsis]
new_model = ring_models.RingInferenceMachine(
max_k=2, max_num_data_points=1500, num_heads=FLAGS.num_heads,
num_encoders=FLAGS.num_encoders, num_decoders=FLAGS.num_decoders,
qkv_dim=FLAGS.value_dim_per_head*FLAGS.num_heads)
pred_cs, pred_params = new_model.classify(
params, rings_inputs, jnp.array([1500]), jnp.array([2]))
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
return rings_inputs[0], true_cs, pred_cs, pred_params
comparison_inference = jax.jit(comparison_inference)
def plot_sklearn_comparison(writer, step, params):
ring_xs, true_cs, pred_cs, pred_params = comparison_inference(params)
fig = plotting.plot_comparison_rings(ring_xs, true_cs, pred_cs, pred_params)
writer.image(
"sklearn_comparison", plotting.plot_to_numpy_image(plt), step=step)
plt.close(fig)
def summarize(writer, step, params, key):
k1, k2, k3 = jax.random.split(key, num=3)
_, cs, _, tfmr_cs, _ = sample_and_classify_eval_batch(k1, params)
ks = onp.full([eval_batch_size], k)
tfmr_metrics = metrics.compute_masked_metrics(
cs, tfmr_cs, ks, ks*data_points_per_mode,
metrics=["pairwise_accuracy", "pairwise_f1",
"pairwise_macro_f1", "pairwise_micro_f1"])
for metric_name, metric_val in tfmr_metrics.items():
writer.scalar("transformer/%s" % metric_name,
metric_val, step=step)
print("Transformer %s: %0.3f" % (metric_name, metric_val))
plot_params(k*data_points_per_mode, writer, step, params, k2)
plot_sklearn_comparison(writer, step, params)
if step == 0:
summarize_baselines(writer, step, k3)
return summarize
def make_logdir(config):
basedir = config.logdir
exp_dir = (
"ring_nheads_%d_nencoders_%d_ndecoders_%d_num_modes_%d"
% (config.num_heads, config.num_encoders, config.num_decoders, config.k))
return os.path.join(basedir, exp_dir)
def main(unused_argv):
if FLAGS.debug_nans:
config.update("jax_debug_nans", True)
if FLAGS.parallel and train.can_train_parallel():
assert FLAGS.batch_size % jax.local_device_count(
) == 0, "Device count must evenly divide batch_size"
FLAGS.batch_size = int(FLAGS.batch_size / jax.local_device_count())
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
model, init_params = make_model(
key,
num_encoders=FLAGS.num_encoders,
num_decoders=FLAGS.num_decoders,
num_heads=FLAGS.num_heads,
value_dim=FLAGS.value_dim_per_head*FLAGS.num_heads,
data_points_per_mode=FLAGS.data_points_per_mode,
k=FLAGS.k)
loss_fn = make_loss(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
batch_size=FLAGS.batch_size)
summarize_fn = make_summarize(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
eval_batch_size=FLAGS.eval_batch_size)
train.train_loop(
subkey,
init_params,
loss_fn,
parallel=FLAGS.parallel,
lr=FLAGS.lr,
num_steps=FLAGS.num_steps,
summarize_fn=summarize_fn,
summarize_every=FLAGS.summarize_every,
checkpoint_every=FLAGS.checkpoint_every,
clobber_checkpoint=FLAGS.clobber_checkpoint,
logdir=make_logdir(FLAGS))
if __name__ == "__main__":
app.run(main)
|
apache-2.0
| 5,101,977,733,388,340,000 | 36.130909 | 89 | 0.641563 | false |
google-research/ssl_detection
|
third_party/tensorpack/tensorpack/tfutils/export.py
|
1
|
6395
|
# -*- coding: utf-8 -*-
# File: export.py
"""
A collection of functions to ease the process of exporting
a model for production.
"""
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
from ..compat import is_tfv2, tfv1
from ..input_source import PlaceholderInput
from ..tfutils.common import get_tensors_by_names, get_tf_version_tuple
from ..tfutils.tower import PredictTowerContext
from ..utils import logger
__all__ = ['ModelExporter']
class ModelExporter(object):
"""Export models for inference."""
def __init__(self, config):
"""Initialise the export process.
Args:
config (PredictConfig): the config to use.
The graph will be built with the tower function defined by this `PredictConfig`.
Then the input / output names will be used to export models for inference.
"""
super(ModelExporter, self).__init__()
self.config = config
def export_compact(self, filename, optimize=True, toco_compatible=False):
"""Create a self-contained inference-only graph and write final graph (in pb format) to disk.
Args:
filename (str): path to the output graph
optimize (bool): whether to use TensorFlow's `optimize_for_inference`
to prune and optimize the graph. This does not work on all types of graphs.
toco_compatible (bool): See TensorFlow's
`optimize_for_inference
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_
for details. Only available after TF 1.8.
"""
if toco_compatible:
assert optimize, "toco_compatible is only effective when optimize=True!"
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
output_tensors = get_tensors_by_names(self.config.output_names)
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
dtypes = [n.dtype for n in input_tensors]
# freeze variables to constants
frozen_graph_def = graph_util.convert_variables_to_constants(
sess,
self.graph.as_graph_def(),
[n.name[:-2] for n in output_tensors],
variable_names_whitelist=None,
variable_names_blacklist=None)
# prune unused nodes from graph
if optimize:
toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, )
frozen_graph_def = optimize_for_inference_lib.optimize_for_inference(
frozen_graph_def,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
*toco_args)
with gfile.FastGFile(filename, "wb") as f:
f.write(frozen_graph_def.SerializeToString())
logger.info("Output graph written to {}.".format(filename))
def export_serving(self, filename,
tags=(tf.saved_model.SERVING if is_tfv2() else tf.saved_model.tag_constants.SERVING,),
signature_name='prediction_pipeline'):
"""
Converts a checkpoint and graph to a servable for TensorFlow Serving.
Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency.
Args:
filename (str): path for export directory
tags (tuple): tuple of user specified tags
signature_name (str): name of signature for prediction
Note:
This produces
.. code-block:: none
variables/ # output from the vanilla Saver
variables.data-?????-of-?????
variables.index
saved_model.pb # a `SavedModel` protobuf
Currently, we only support a single signature, which is the general PredictSignatureDef:
https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md
"""
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
saved_model = tfv1.saved_model.utils
inputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in input_tensors}
output_tensors = get_tensors_by_names(self.config.output_names)
outputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in output_tensors}
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
builder = tfv1.saved_model.builder.SavedModelBuilder(filename)
prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_signatures,
outputs=outputs_signatures,
method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
sess, list(tags),
signature_def_map={signature_name: prediction_signature})
builder.save()
logger.info("SavedModel created at {}.".format(filename))
|
apache-2.0
| 8,489,954,374,278,607,000 | 42.80137 | 122 | 0.616575 | false |
kefin/django-garage
|
garage/tests/slugify/tests.py
|
1
|
8824
|
# -*- coding: utf-8 -*-
"""
tests.slugify.tests
Tests for garage.slugify
* created: 2014-08-24 Kevin Chan <kefin@makedostudio.com>
* updated: 2015-02-23 kchan
"""
from __future__ import (absolute_import, unicode_literals)
from mock import Mock, patch, call
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from garage.test import SimpleTestCase
class SlugifyTests(SimpleTestCase):
def test_strip_accents(self):
"""
Ensure strip_accents function is working properly.
"""
from garage.slugify import strip_accents
self._msg('test', 'strip_accents', first=True)
txt = 'écriture 寫作'
expected = 'ecriture '
result = strip_accents(txt)
self._msg('text', txt)
self._msg('result', result)
self._msg('expected', expected)
self.assertEqual(result, expected)
def test_slugify(self):
"""
Ensure slugify function is working properly.
"""
from garage.slugify import slugify
self._msg('test', 'slugify', first=True)
txt = 'The Renaissance of Giselle “G” Töngi'
expected = 'the-renaissance-of-giselle-g-tongi'
result = slugify(txt)
self._msg('text', txt)
self._msg('result', result)
self._msg('expected', expected)
self.assertEqual(result, expected)
txt = 'Apoyan resolución a favor de niños migrantes en LA'
expected = 'apoyan-resolucion-a-favor-de-ninos-migrantes-en-la'
result = slugify(txt)
self._msg('text', txt)
self._msg('result', result)
self._msg('expected', expected)
self.assertEqual(result, expected)
txt = '“foo! écriture 寫作 #bar???”'
expected = 'foo-ecriture-bar'
result = slugify(txt)
self._msg('text', txt)
self._msg('result', result)
self._msg('expected', expected)
self.assertEqual(result, expected)
txt = txt = 'Nín hǎo. Wǒ shì zhōng guó rén'
expected = 'nin-hao-wo-shi-zhong-guo-ren'
result = slugify(txt)
self._msg('text', txt)
self._msg('result', result)
self._msg('expected', expected)
self.assertEqual(result, expected)
@override_settings(SLUG_SEPARATOR='.')
def test_get_slug_separator(self):
"""
Ensure get_slug_separator function is working properly.
"""
self._msg('test', 'get_slug_separator', first=True)
from garage.slugify import get_slug_separator
separator = get_slug_separator()
self.assertEqual(separator, '.')
self._msg('separator', separator)
@override_settings(SLUG_ITERATION_SEPARATOR='.')
def test_get_slug_iteration_separator(self):
"""
Ensure get_slug_iteration_separator function is working properly.
"""
self._msg('test', 'get_slug_iteration_separator', first=True)
from garage.slugify import get_slug_iteration_separator
separator = get_slug_iteration_separator()
self.assertEqual(separator, '.')
self._msg('separator', separator)
@override_settings(SLUG_ITERATION_SEPARATOR='.')
def test_get_slug_base(self):
"""
Ensure get_slug_base function is working properly.
"""
self._msg('test', 'get_slug_base', first=True)
from garage.slugify import get_slug_base
separator = '.'
slug = 'example.999'
slug_base = get_slug_base(slug, slug_iteration_separator=separator)
self.assertEqual(slug_base, 'example')
self._msg('slug', slug)
self._msg('separator', separator)
self._msg('slug_base', slug_base)
separator = '--'
slug = 'example-2015--2'
slug_base = get_slug_base(slug, slug_iteration_separator=separator)
self.assertEqual(slug_base, 'example-2015')
self._msg('slug', slug)
self._msg('separator', separator)
self._msg('slug_base', slug_base)
separator = '~'
slug = 'example-999~9876'
slug_base = get_slug_base(slug, slug_iteration_separator=separator)
self.assertEqual(slug_base, 'example-999')
self._msg('slug', slug)
self._msg('separator', separator)
self._msg('slug_base', slug_base)
separator = '~'
slug = 'example-123-4567'
slug_base = get_slug_base(slug, slug_iteration_separator=separator)
self.assertEqual(slug_base, 'example-123-4567')
self._msg('slug', slug)
self._msg('separator', separator)
self._msg('slug_base', slug_base)
separator = '-copy'
slug = 'example-copy4'
slug_base = get_slug_base(slug, slug_iteration_separator=separator)
self.assertEqual(slug_base, 'example')
self._msg('slug', slug)
self._msg('separator', separator)
self._msg('slug_base', slug_base)
def test_slug_creation_error(self):
"""
slug_creation_error raises a ValidationError (obsolete function).
"""
self._msg('test', 'slug_creation_error', first=True)
from django.core.exceptions import ValidationError
from garage.slugify import slug_creation_error
with self.assertRaises(ValidationError):
slug_creation_error()
def test_create_unique_slug(self):
"""
create_unique_slug will create a unique slug for a model
instance.
"""
self._msg('test', 'create_unique_slug', first=True)
from garage.slugify import create_unique_slug, SLUG_ITERATION_SEPARATOR
separator = SLUG_ITERATION_SEPARATOR
obj = Mock()
obj.slug = 'example'
queryset = Mock()
queryset.exclude.return_value = queryset
queryset.filter.return_value = None
dummy_model = Mock()
dummy_model._default_manager.all.return_value = queryset
obj.__class__ = dummy_model
result = create_unique_slug(obj)
expected = 'example'
self.assertEqual(result, expected)
self._msg('slug', result)
# create a unique slug that ends with '3'
ncopy = '3'
def side_effect(**kwargs):
slug = kwargs.get('slug')
if slug and slug.endswith(ncopy):
return None
return True
obj = Mock()
obj.slug = 'example{0}1'.format(separator)
queryset = Mock()
queryset.exclude.return_value = queryset
queryset.filter.side_effect = side_effect
dummy_model = Mock()
dummy_model._default_manager.all.return_value = queryset
obj.__class__ = dummy_model
result = create_unique_slug(obj)
expected = 'example{0}{1}'.format(separator, ncopy)
self.assertEqual(result, expected)
self._msg('slug', result)
def test_get_unique_slug(self):
"""
get_unique_slug will create a unique slug for a model
instance.
"""
self._msg('test', 'get_unique_slug', first=True)
from garage.slugify import get_unique_slug, SLUG_ITERATION_SEPARATOR
separator = SLUG_ITERATION_SEPARATOR
slug_field = 'slug'
slug_base = 'example'
obj = Mock()
obj.slug = slug_base
queryset = Mock()
queryset.exclude.return_value = queryset
queryset.filter.return_value = None
dummy_model = Mock()
dummy_model._default_manager.all.return_value = queryset
obj.__class__ = dummy_model
result, _ = get_unique_slug(obj,
slug_field=slug_field,
slug_base=slug_base,
slug_separator=separator)
expected = slug_base
self.assertEqual(result, expected)
self._msg('slug', result)
# create a unique slug that ends with '3'
ncopy = '3'
def side_effect(**kwargs):
slug = kwargs.get(slug_field)
if slug and slug.endswith(ncopy):
return None
return True
obj = Mock()
obj.slug = '{0}{1}1'.format(slug_base, separator)
queryset = Mock()
queryset.exclude.return_value = queryset
queryset.filter.side_effect = side_effect
dummy_model = Mock()
dummy_model._default_manager.all.return_value = queryset
obj.__class__ = dummy_model
result, _ = get_unique_slug(obj,
slug_field=slug_field,
slug_base=slug_base,
slug_separator=separator)
expected = '{0}{1}{2}'.format(slug_base, separator, ncopy)
self.assertEqual(result, expected)
self._msg('slug', result)
|
bsd-3-clause
| -3,521,196,795,077,406,000 | 33.904762 | 79 | 0.587881 | false |
mcolom/ipolDevel
|
tools/migrations/demoinfo_db_migration.py
|
1
|
4289
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public Licence (GPL)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
# This script move the "creation" field from demo_demodescription to demodescription.
from sys import argv, exit
import sqlite3 as lite
def main():
if len(argv) != 2:
print "Usage : ./demoinfo_db_migration.py <demoinfo database>"
exit()
db = str(argv[1])
try:
conn = lite.connect(db)
cursor_db = conn.cursor()
cursor_db.execute("""
PRAGMA foreign_keys = OFF;
""")
conn.commit()
# The following operation is required because SQLite does not handle
# default timestamp value in timestamp field creation via ALTER TABLE query.
print "Creating demodescription buffer table"
cursor_db.execute("""
CREATE TABLE IF NOT EXISTS "demodescription_buf" (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
creation TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
inproduction INTEGER(1) DEFAULT 1,
JSON BLOB
);
""")
conn.commit()
cursor_db.execute("""
INSERT INTO demodescription_buf (ID, inproduction, JSON)
SELECT ID, inproduction, JSON
FROM demodescription;
""")
conn.commit()
print "Moving creations timestamp into demodescription buffer table"
cursor_db.execute("""
SELECT creation, demodescriptionId FROM demo_demodescription;
""")
conn.commit()
creation_dates = cursor_db.fetchall()
for row in creation_dates:
cursor_db.execute("""
UPDATE demodescription_buf
SET creation=?
WHERE ID=?
""", row)
conn.commit()
# The following operation is required because SQLite does not handle
# column removal inside tables using ALTER TABLE query.
print "Correcting demo_demodescription schema"
cursor_db.execute("""
CREATE TABLE IF NOT EXISTS "demo_demodescription_buf" (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
demoID INTEGER NOT NULL,
demodescriptionId INTEGER NOT NULL,
FOREIGN KEY(demodescriptionId) REFERENCES demodescription(id) ON DELETE CASCADE,
FOREIGN KEY(demoID) REFERENCES demo(id) ON DELETE CASCADE
);
""")
cursor_db.execute("""
INSERT INTO demo_demodescription_buf (ID, demoID, demodescriptionId)
SELECT ID, demoID, demodescriptionId
FROM demo_demodescription;
""")
conn.commit()
cursor_db.execute("""
DROP TABLE demo_demodescription;
""")
conn.commit()
cursor_db.execute("""
ALTER TABLE demo_demodescription_buf RENAME TO demo_demodescription;
""")
conn.commit()
print "Making demodescription buffer table as the new demodescription table"
cursor_db.execute("""
DROP TABLE demodescription;
""")
conn.commit()
cursor_db.execute("""
ALTER TABLE demodescription_buf RENAME TO demodescription;
""")
conn.commit()
cursor_db.execute("""
PRAGMA foreign_keys = ON;
""")
conn.commit()
cursor_db.execute("""
VACUUM;
""")
conn.commit()
conn.close()
print "OK"
except Exception as ex:
print "KO"
print str(ex)
print "Database probably jeopardized... Do not use the file the script was run on."
conn.rollback()
conn.close()
main()
|
agpl-3.0
| 1,223,077,201,194,674,400 | 31.492424 | 91 | 0.615528 | false |
OSEHRA/VistA
|
Scripts/GitUtils.py
|
1
|
9780
|
#---------------------------------------------------------------------------
# Copyright 2013-2019 The Open Source Electronic Health Record Alliance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
from builtins import zip
import codecs
import os
import sys
import subprocess
import re
import argparse
import difflib
from LoggerManager import logger, initConsoleLogging
""" Utilities Functions to wrap around git command functions via subprocess
1. make sure git is accessible directly via command line,
or git is in the %path% for windows or $PATH for Unix/Linux
"""
DEFAULT_GIT_HASH_LENGTH = 40 # default git hash length is 40
def getGitRepoRevisionHash(revision="HEAD", gitRepoDir=None):
"""
Utility function to get the git hash based on a given git revision
@revision: input revision, default is HEAD on the current branch
@gitRepoDir: git repository directory, default is current directory.
@return: return git hash if success, None otherwise
"""
git_command_list = ["git", "rev-parse", "--verify", revision]
result, output = _runGitCommand(git_command_list, gitRepoDir)
if not result:
return None
lines = output.split('\r\n')
for line in lines:
line = line.strip(' \r\n')
if re.search('^[0-9a-f]+$', line):
return line
return None
def commitChange(commitMsgFile, gitRepoDir=None):
"""
Utility function to commit the change in the current branch
@commitMsgFile: input commit message file for commit
@gitRepoDir: git repository directory, default is current directory.
@return: return True if success, False otherwise
"""
if not os.path.exists(commitMsgFile):
return False
git_command_list = ["git", "commit", "-F", commitMsgFile]
result, output = _runGitCommand(git_command_list, gitRepoDir)
logger.info(output)
return result
def addChangeSet(gitRepoDir=None, patternList=[]):
"""
Utility function to add all the files changed to staging area
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@patternList: a list of pattern for matching files.
need to escape wildcard character '*'
@return: return True if success, False otherwise
"""
patternIncludeList = ["*.m"]
for dir in os.listdir(gitRepoDir):
git_command_list = ["git", "diff","--", "*.zwr"]
result, output = _runGitCommand(git_command_list,os.path.join(gitRepoDir,dir))
if not result:
logger.error("Git DIFF command failed: " + output)
raise Exception("Git DIFF command failed: " + output)
test = output.split("\n")
outLineStack = []
results = []
"""
Attempts to check each global file useful information in the diff. It
checks through the diff of each ZWR file. If it finds a pair of addition
and removal, it checks that the line change isn't just a date/time or
number change. If a file consists entirely of date/time changes, it is
excluded from the added files. A special case is made for the DEVICE file
to eliminate the count of times that each DEVICE was opened.
This assumes the script will be run in the "Packages" directory, which
necessitates the removal of the "Packages/" string from the filename
"""
currentFile=None
skipNext=False
for index, line in enumerate(test):
if '.zwr' in line:
if ("OK" in results) or len(outLineStack):
patternIncludeList.append(currentFile)
outLineStack = []
currentFile = line[15:].strip()
results = []
continue
if line.startswith("-"):
outLineStack.append(line)
elif line.startswith("+"):
if len(outLineStack):
diffStack=[]
out = difflib.ndiff(line[1:].split("^"), outLineStack[0][1:].split("^"))
outList = '**'.join(out).split("**")
if len(outList) > 1:
for i,s in enumerate(outList):
if i == len(outList):
results.append("OK")
break
if s:
if s[0]=="-":
diffStack.append(s[2:])
if s[0] == "+":
if len(diffStack):
if re.search("DIC\(9.8,",s[2:]):
break
if re.search("[0-9]{7}(\.[0-9]{4,6})*",s[2:]) or re.search("[0-9]{7}(\.[0-9]{4,6})*",diffStack[0]):
results.append("DATE")
break
if re.search("[0-9]{2}\-[A-Z]{3}\-[0-9]{4}",s[2:]) or re.search("[0-9]{2}\:[0-9]{2}\:[0-9]{2}",diffStack[0]) :
results.append("DATE")
break
if re.search("[0-9]{2}:[0-9]{2}:[0-9]{2}",s[2:]) or re.search("[0-9]{2}\:[0-9]{2}\:[0-9]{2}",diffStack[0]) :
results.append("DATE")
break
# Removes a specific global entry in DEVICE file which maintains a count of the times the device was opened
if re.search("%ZIS\([0-9]+,[0-9]+,5",s[2:]):
break
diffStack.pop(0)
outLineStack.pop(0)
else:
results.append("OK")
# Ensure that the last object is captured, if necessary
if ("OK" in results) or len(outLineStack):
patternIncludeList.append(currentFile)
""" Now add everything that can be found or was called for"""
git_command_list = ["git", "add", "--"]
totalIncludeList = patternList + patternIncludeList
for file in totalIncludeList:
git_command = git_command_list + [file]
result, output = _runGitCommand(git_command, gitRepoDir)
if not result:
logger.error("Git add command failed: " + output)
raise Exception("Git add command failed: " + output)
logger.info(output)
""" Add the untracked files through checking for "other" files and
then add the list
"""
git_command = ["git","ls-files","-o","--exclude-standard"]
result, lsFilesOutput = _runGitCommand(git_command, gitRepoDir)
git_command_list = ["git","add"]
for file in lsFilesOutput.split("\n"):
if len(file):
git_command = git_command_list + [file]
result, output = _runGitCommand(git_command, gitRepoDir)
if not result:
logger.error("Git ls-files command failed: " + output)
raise Exception("Git ls-files command failed: " + output)
return result
def switchBranch(branchName, gitRepoDir=None):
"""
Utility function to switch to a different branch
@branchName: the name of the branch to switch to
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@return: return True if success, False otherwise
"""
git_command_list = ["git", "checkout", branchName]
result, output = _runGitCommand(git_command_list, gitRepoDir)
logger.info(output)
return result
def getStatus(gitRepoDir=None, subDirPath=None):
"""
Utility function to report git status on the directory
@gitRepoDir: git repository directory, default is current directory.
if provided, will only add all changes under that directory
@subDirPath: report only the status for the subdirectory provided
@return: return the status message
"""
git_command_list = ["git", "status"]
if subDirPath:
git_command_list.extend(['--', subDirPath])
result, output = _runGitCommand(git_command_list, gitRepoDir)
return output
def getCommitInfo(gitRepoDir=None, revision='HEAD'):
"""
Utility function to retrieve commit information
like date/time in Unix timestamp, title and hash
@gitRepoDir: git repository directory, default is current directory.
if provided, will only report info WRT to git repository
@revision: the revision to retrieve info, default is HEAD
@return: return commit info dictionary
"""
delim = '\n'
outfmtLst = ("%ct","%s","%H")
git_command_list = ["git", "log"]
fmtStr = "--format=%s" % delim.join(outfmtLst)
git_command_list.extend([fmtStr, "-n1", revision])
result, output = _runGitCommand(git_command_list, gitRepoDir)
if result:
return dict(list(zip(outfmtLst, output.strip('\r\n').split(delim))))
return None
def _runGitCommand(gitCmdList, workingDir):
"""
Private Utility function to run git command in subprocess
@gitCmdList: a list of git commands to run
@workingDir: the working directory of the child process
@return: return a tuple of (True, output) if success,
(False, output) otherwise
"""
output = None
try:
popen = subprocess.Popen(gitCmdList,
cwd=workingDir, # set child working directory
stdout=subprocess.PIPE)
output = popen.communicate()[0]
if popen.returncode != 0: # command error
return (False, codecs.decode(output,'utf-8','ignore'))
return (True, codecs.decode(output, 'utf-8', 'ignore'))
except OSError as ex:
logger.error(ex)
return (False, codecs.decode(output, 'utf-8', 'ignore'))
def main():
initConsoleLogging()
pass
if __name__ == '__main__':
main()
|
apache-2.0
| -2,972,550,952,275,129,300 | 39.580913 | 130 | 0.630573 | false |
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_load_balancer_backend_address_pools_operations.py
|
1
|
8800
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
|
mit
| 6,371,308,267,858,528,000 | 48.438202 | 218 | 0.655114 | false |
bcaligari/mbox_defiler
|
filededup.py
|
1
|
4945
|
#!/usr/bin/env python3
# Copyright (c) 2017 Brendon Caligari <caligari@cypraea.co.uk>
# This file is released under GPL v3.0
import logging
import pathlib
import unfile
import json
class FileDedup(object):
"""Dedup existing files in a directory and add more unique files.
The directory is first scanned and duplicates removed. More unique
files can then be added.
Multiple requests to FileDedup("some_directory") return a reference to
the same FileDedup object.
Logger levels:
info - dedup activity
debug - displays some internals
"""
class FileName(object):
"""FileName on disk may have been sanitised or have duplicates"""
def __init__(self, on_disk_name):
self.on_disk_name = on_disk_name
self.original_name = None
self.duplicate_names = set()
__caches = {}
def __new__(cls, dirname):
dirname_actual = str(pathlib.Path(dirname).resolve())
if not dirname_actual in FileDedup.__caches:
FileDedup.__caches[dirname_actual] = super().__new__(cls)
return FileDedup.__caches[dirname_actual]
def __init__(self, dirname):
logging.debug("Initialised FileDedup for '{}'".format(dirname))
if dirname[-1] == "/":
self._dir = dirname
else:
self._dir = dirname + "/"
self._file_cache = {} # key = md5sum.size, value = FileName
self._read_dedup_dir()
def _read_dedup_dir(self):
"""Scan, catalog, and dedup directory"""
logging.debug("Scanning files already in directory")
scan_dir = pathlib.Path(self._dir)
all_files = [f for f in scan_dir.iterdir() if f.is_file()]
for file_found in all_files: # type(file_found) == pathlib.Path
if file_found.is_symlink(): # we don't want symlinks
file_found.unlink()
logging.info("Unlinked symlink '{}'".format(file_found))
continue
uf = unfile.UnFile(file_found.read_bytes(), file_found.parts[-1])
if uf.get_size() == 0:
file_found.unlink()
logging.info("Unlinked zero sized regular file '{}'".format(file_found))
continue
if self._is_cached(uf.get_key()):
self._record_dup(uf.get_key(), uf.get_name())
file_found.unlink()
logging.info("Unlinked duplicate regular file '{}'".format(file_found))
else:
self._record_file(uf.get_key(), uf.get_name(), uf.get_origname())
logging.info("Leaving unique regular file '{}'".format(file_found))
logging.debug("Finished processing pre-existing files")
def _commit_file(self, pathspec, blob):
"""Commit a binary blob to disk as a file"""
pathlib.Path(pathspec).write_bytes(blob)
def _record_file(self, key, filename, origname):
"""Record in _cache that a unique file is on disk"""
self._file_cache[key] = self.FileName(filename)
if filename != origname:
self._file_cache[key].original_name = origname
def _record_dup(self, key, filename):
"""Record in _cache that a duplicate has been detected"""
self._file_cache[key].duplicate_names.add(filename)
def _is_cached(self, key):
"""Check if a binary blob already exists as a file"""
return key in self._file_cache
def add_file(self, uf):
"""Add an Unfile to the dedup directory"""
if self._is_cached(uf.get_key()):
self._record_dup(uf.get_key(), uf.get_name())
logging.info("Skipped duplicate of on disk '{}'".format(uf.get_name()))
else:
uf.sanitise_name() # We can't trust filenames coming from wherever
if uf.get_name() != uf.get_origname():
logging.info("Sanitising file name of '{}'".format(uf.get_origname()))
if pathlib.Path("{}/{}".format(self._dir, uf.get_name())).exists():
logging.info("Renaming unique file with name collision for '{}'".format(uf.get_name()))
uf.infer_ext()
uf.cook_name()
self._commit_file("{}/{}".format(self._dir, uf.get_name()), uf.blob)
self._record_file(uf.get_key(), uf.get_name(), uf.get_origname())
logging.info("Adding unique file '{}'".format(uf.get_name()))
def report(self):
"""Reports on ondisk files with original and duplicate filenames"""
struct = dict()
for f in self._file_cache.keys():
struct[self._file_cache[f].on_disk_name] = {
"original" : self._file_cache[f].original_name,
"duplicates" : list(self._file_cache[f].duplicate_names)
}
return json.dumps(struct, sort_keys=True, indent=4)
if __name__ == "__main__":
pass
|
gpl-3.0
| -7,705,359,799,193,418,000 | 40.208333 | 103 | 0.575733 | false |
hack4impact/legal-checkup
|
app/api/forms.py
|
1
|
1404
|
from flask_wtf import Form
from wtforms.fields import (
BooleanField,
FieldList,
FormField,
SelectField,
SelectMultipleField,
SubmitField,
TextAreaField,
TextField
)
from wtforms.validators import InputRequired, Length
class ParameterForm(Form):
param_name = TextField('Parameter Name', validators=[InputRequired(), Length(1, 500)])
description = TextField('Description', validators=[InputRequired(), Length(1, 500)])
param_format = TextField('Format', validators=[InputRequired(), Length(1, 500)])
class NewAPIForm(Form):
name = TextField('Name of API', validators=[InputRequired(), Length(1, 500)])
region = SelectField('Region',
choices=[('Philadelphia', 'Philadelphia'), ('Pennsylvania', 'Pennsylvania')],
validators=[InputRequired()]
)
# Parameters are dynamically populated when rendered -- see views.py.
parameters = SelectMultipleField('Parameters',
choices=[],
validators=[])
# TODO: Removing parameters
new_parameter = FieldList(FormField(ParameterForm), min_entries=0)
add_parameter = SubmitField('Add a new parameter')
url = TextField('API URL', validators=[InputRequired(), Length(1, 500)])
description= TextAreaField('Description')
submit = SubmitField('Add API')
|
mit
| -5,709,190,334,332,081,000 | 40.294118 | 105 | 0.643162 | false |
edonyM/emthesis
|
code/3point2plane.py
|
1
|
3545
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-11-30 16:04
#
# Filename: 3point2plane.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('3 point into plane')
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = 5 - 0.3*X + 0.48*Y
p1 = [5.3, 0.1, 5-0.3*5.3+0.48*0.1]
p2 = [2.3, 0.7, 5-0.3*2.3+0.48*0.7]
p3 = [8.3, 3.1, 5-0.3*8.3+0.48*3.1]
ax.plot_surface(X, Y, Z, rstride=100, cstride=100, alpha=0.3)
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.scatter(p3[0], p3[1], p3[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
|
mit
| -4,273,530,887,346,554,400 | 32.130841 | 89 | 0.372073 | false |
openssbd/OpenSSBD
|
SSBD/BDML/views.py
|
1
|
39099
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core import serializers
from django.db.models import Q, Max, Min, Avg
myDebug=False
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
from django.db import transaction
from django.views.decorators.csrf import csrf_exempt
import json
from models import *
from xml.dom import minidom # needed by generateDS
from xml.dom import Node # needed by generateDS
import bdmllib # generateDS generated bdml interface
bdmldir='/tmp/bdml/'
def univertices(request, bdmlid, time, etype):
try:
# verticeslist = unicoords_model.objects.filter(bdml=bdmlid, t=time, entitytype=etype).order_by("entity","id")
verticeslist = unicoords_model.objects.filter(bdml=bdmlid, t=time, entitytype=etype).order_by("coords","id")
data = process_vertices(verticeslist, etype)
except:
errortext = {"univertices" : "error: cannot retrieve vertices"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(data, mimetype='application/json')
def univerticestp(request, bdmlid, tp, etype):
try:
# verticeslist = unicoords_model.objects.filter(bdml=bdmlid, t=time, entitytype=etype).order_by("entity","id")
verticeslist = unicoords_model.objects.filter(bdml=bdmlid, timept=tp, entitytype=etype).order_by("coords","id")
data = process_vertices(verticeslist, etype)
except:
errortext = {"univertices" : "error: cannot retrieve vertices"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(data, mimetype='application/json')
def stats(request, bdmlid, time):
try:
print "vertices_avg"
data = unicoords_model.objects.filter(bdml_id=bdmlid, t=time).aggregate(
avgx=Avg('x'),
avgy=Avg('y'),
avgz=Avg('z'),
xmax=Max('x'),
xmin=Min('x'),
ymax=Max('y'),
ymin=Min('y'),
zmax=Max('z'),
zmin=Min('z'),
)
except:
errortext = {"vertices_avg" : "error"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(data, mimetype='application/json')
def vertices(request, bdmlid, time, etype):
try:
verticeslist = Coordinates_model.objects.filter(bdml=bdmlid, t=time, entitytype=etype).order_by("entity","id")
data = process_vertices(verticeslist, etype)
except:
errortext = {"vertices" : "error"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(data, mimetype='application/json')
def vertices_avg(request, bdmlid, time):
try:
print "vertices_avg"
data = Coordinates_model.objects.filter(bdml_id=bdmlid, t=time).aggregate(
avgx=Avg('x'),
avgy=Avg('y'),
avgz=Avg('z'),
xmax=Max('x'),
xmin=Min('x'),
ymax=Max('y'),
ymin=Min('y'),
zmax=Max('z'),
zmin=Min('z'),
)
# print "data: %s" % data
# data = {'bdml_id': bdmlid, 'avgx' : avgx, 'avgy' : avgy, 'avgz' : avgz, 't' : time }
# 'tmin' : time_min,
# 'tmax' : time_max,
# 'min_t': minmax_t['time__min'],
# 'max_t': minmax_t['time__max'],
# 'cam_x': avgx*scale.xScale*scaleup,
# 'cam_y': avgy*scale.yScale*scaleup,
# 'cam_z': avgz*scale.zScale*scaleup,
# 'xmax' : xmax,
# 'ymax' : ymax,
# 'xmin' : xmin,
# 'ymin' : ymin,
# 'xscale': scale.xScale*scaleup,
# 'yscale': scale.yScale*scaleup,
# 'zscale': scale.zScale*scaleup,
# 'scaleup' : scaleup,
# }
# data = process_vertices(verticeslist, etype)
except:
print "error"
errortext = {"vertices" : "error"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(json.dumps(data), mimetype='application/json')
def vertices_range(request, bdmlid, time_min, time_max, etype):
try:
verticeslist = Coordinates_model.objects.filter(bdml=bdmlid, t__gte=time_min, t__lte=time_max, entitytype=etype).order_by("entity","id")
data = process_vertices(verticeslist, etype)
except:
errortext = {"vertices" : "error"} # cannot retrieve vertices
data = json.dumps('json',errortext)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
else:
return HttpResponse(data, mimetype='application/json')
def process_vertices(vlist, etype):
debugPrint("starting")
debugPrint("verticeslist %s" % vlist)
if vlist.count() != 0:
v =""
for i in vlist:
if etype == "sphere":
debugPrint("%s %s %s %s %s %s %s" % (i.entity_id, i.id, i.x, i.y, i.z, i.t, i.radius))
v+="%s %s %s %s %s %s %s " % (i.entity_id, i.id, i.x, i.y, i.z, i.t, i.radius)
else:
debugPrint("%s %s %s %s %s %s" % (i.entity_id, i.id, i.x, i.y, i.z, i.t))
v+="%s %s %s %s %s %s " % (i.entity_id, i.id, i.x, i.y, i.z, i.t)
debugPrint(v)
varray = v.split()
# debugPrint(varray)
# vl = [float(j) for j in varray]
vl = map(float, varray)
debugPrint(vl)
returnobject = {"vertices" : vl}
data = json.dumps(returnobject)
else:
emptytext = {"vertices" : "error"} # no data
data = json.dumps(emptytext)
return data
# The decorator allows Django to roll back the transaction if the function raises an exception
@transaction.commit_on_success
def read_file(request, filename):
"""
Reading in a BDML file and binding its content to bdml model and save that in the database
except it will not save any information from Data tag
:param request:
:param filename:
bdml_instance = bdml_api.parse('/tmp/wt-CDD0301160201.bdml')
bdml_instance = bdml_api.parse('/tmp/split-wt-CDD0301160201.bdml')
bdml_instance = bdml_api.parse('/tmp/bao-wild-type-embryo.bdml')
check database and see whether the same document exists - check info.title, summary.contributors, info.version
reading in bdml file using genereateDS
"""
outdata = {}
try:
import os
bdmlfile = os.path.join(bdmldir, filename)
debugPrint("bdmlfile = %s" % bdmlfile)
bdmlfile_instance = bdmllib.parse(bdmlfile)
checkbdmlid = bdmlfile_instance.info.bdmlID
debugPrint("BDML ID: %s" % checkbdmlid)
dbInfo = Info_model.objects.filter(bdmlID=checkbdmlid)
if len(dbInfo) != 0: # same contributors found in the existing database
debugPrint("DB bdml ID %s" % dbInfo[0].bdmlID)
debugPrint("The same BDML data exists in the database")
debugPrint("This BDML file will not be read into the database")
outdata = {
'error' : 'Same BDML data exists in the database',
'BDML ID' : checkbdmlid,
}
else:
debugPrint("The BDML does not appear in this database")
debugPrint("Reading the BDML data into the database")
outdata = binding_bdml(bdmlfile_instance)
#check to see whether there is any exception error, if there is, then raise exception.
if outdata['error'] != 'none':
raise Exception(outdata)
debugPrint("finishing binding_bdml 2")
except Exception as e:
outdata = {
'error': "Cannot save BDML in the database",
'details': "%s" % e,
}
debugPrint(outdata)
jsonlist = json.dumps(outdata)
return HttpResponse(jsonlist, mimetype='application/javascript; charset=UTF-8')
@transaction.commit_on_success
def binding_bdml(bdml_instance):
"""
Binding BDML_Document model into Django Object Orientated model
:param bdml_instance:
: param bdml_instance: bdml instance through bdml_api
"""
outdata = {}
debugPrint("start binding_bdml")
try:
debugPrint("title: %s" % bdml_instance.info.title)
debugPrint("bdmlID: %s" % bdml_instance.info.bdmlID)
new_bdml = bdml_model(
title = bdml_instance.info.title,
bdml_ID = bdml_instance.info.bdmlID,
)
print("saving bdml model")
new_bdml.save()
bdmlid = new_bdml
print("bdml saved, bdmlid:%s" %bdmlid)
#Mapping Info model
new_info = Info_model(
bdml = bdmlid,
bdmlID = bdml_instance.info.bdmlID,
title=bdml_instance.info.title,
version=bdml_instance.info.version,
release=bdml_instance.info.release,
license=bdml_instance.info.license,
)
print("saving info")
new_info.save()
infoid = new_info
print("info saved")
print("info saved, infoid:%s" %infoid)
#Mapping Summary model
new_summary = Summary_model(
bdml = bdmlid,
description=bdml_instance.summary.description,
organism=bdml_instance.summary.organism,
datatype=bdml_instance.summary.datatype,
identifier=bdml_instance.summary.identifier,
basedon=bdml_instance.summary.basedon,
contributors=bdml_instance.summary.contributors,
citation=bdml_instance.summary.citation,
PMID=bdml_instance.summary.PMID,
dblink=bdml_instance.summary.dblink,
)
print("saving summary")
new_summary.save()
summaryid = new_summary
print("summary saved")
print("summary saved, summaryid:%s" %summaryid)
#Mapping Contact model
new_contact = Contact_model(
bdml = bdmlid,
name = bdml_instance.contact.name,
E_mail=bdml_instance.contact.E_mail,
phone=bdml_instance.contact.phone,
URL=bdml_instance.contact.URL,
organization=bdml_instance.contact.organization,
department=bdml_instance.contact.department,
laboratory=bdml_instance.contact.laboratory,
address=bdml_instance.contact.address,
)
print("saving contacts")
new_contact.save()
contactid = new_contact
print("contacts saved")
print("contacts saved, contactid:%s" %contactid)
#Mapping Methods model
new_methods = Methods_model(
bdml = bdmlid,
summary=bdml_instance.methods.summary,
source=bdml_instance.methods.source,
pdpml=bdml_instance.methods.pdpml,
)
print("saving methods")
new_methods.save()
methodsid = new_methods
print("methods saved")
print("methods saved, methodsid:%s" %methodsid)
#Above entities are independent of each others and they are not nested.
#Below entities are nested and need to iterate before they can be saved accordingly
#Mapping scaleUnit model
new_scaleunit = ScaleUnit_model(
bdml = bdmlid,
xScale=bdml_instance.data.scaleUnit.xScale,
yScale=bdml_instance.data.scaleUnit.yScale,
zScale=bdml_instance.data.scaleUnit.zScale,
tScale=bdml_instance.data.scaleUnit.tScale,
xyzUnit=bdml_instance.data.scaleUnit.xyzUnit,
tUnit=bdml_instance.data.scaleUnit.tUnit,
)
print("saving scaleunit")
new_scaleunit.save()
scaleunitid = new_scaleunit
print("scaleunit saved, scaleunit:%s" %scaleunitid)
#Mapping Data model
new_data = Data_model(
bdml = bdmlid,
scaleUnit = scaleunitid,
)
print("saving data")
new_data.save()
dataid = new_data
print("data %s saved" %dataid)
#Mapping Object model
# for i in bdml_instance.data.object:
# while bdml_instance.get_data().get_object() != None:
objectlist = bdml_instance.get_data().get_object()
for i in objectlist:
debugPrint("len objectlist %s" % len(objectlist))
debugPrint("object i %s" %i)
debugPrint("objectName: %s" % i.get_objectName())
new_object = Object_model(
bdml = bdmlid,
objectName=i.get_objectName(),
data = dataid,
)
print("saving object")
new_object.save()
objectid = new_object
print("object %s saved" %objectid)
#Mapping Feature model
debugPrint("starting processing feature")
if bdml_instance.data.get_feature() != []:
debugPrint("feature exists")
featurelist = bdml_instance.get_data().get_feature()
for ii in featurelist:
debugPrint("len featurelist %s" % len(featurelist))
debugPrint("feature ii %s" %ii)
debugPrint("featureName: %s" % ii.get_featureName())
debugPrint("featureScale: %s" % ii.get_featureScale())
debugPrint("featureUnit: %s" % ii.get_featureUnit())
new_feature = Feature_model(
bdml = bdmlid,
featureName=ii.get_featureName(),
featureScale=ii.get_featureScale(),
featureUnit=ii.get_featureUnit(),
data = dataid,
)
debugPrint("saving feature")
new_feature.save()
featureid = new_feature
debugPrint("feature %s saved" %featureid)
else:
debugPrint("feature does not exist")
#Mapping Component model
debugPrint("starting to bind component")
componentlist = bdml_instance.data.get_component()
debugPrint("len(componentlist) = %s" % len(componentlist))
if componentlist == []:
debugPrint("no component!!")
else:
for j in componentlist:
new_component = Component_model(
bdml = bdmlid,
componentID=j.get_componentID(),
componentName = j.get_componentName(),
time=float(j.get_time()),
data = dataid,
)
debugPrint("saving component")
debugPrint("bdml=%s, dataid=%s, componentid=%s, time=%s, componentname=%s, float-time=%s" % (bdmlid, dataid, j.get_componentID(), j.get_time(), j.get_componentName(), float(j.get_time())))
new_component.save()
component_dbid = new_component
debugPrint("component %s saved" %component_dbid)
#Mapping PrevID model
if j.get_prevID() != []:
debugPrint("prevID exists")
prevIDlist = j.get_prevID()
debugPrint("len(prevIDlist)=%s" % len(prevIDlist))
for jp in j.prevID:
debugPrint("prevID exists %s" % jp)
new_previdmodel = PrevID_model(
bdml = bdmlid,
component = component_dbid,
data = dataid,
prevID = jp,
)
debugPrint("saving prevID")
new_previdmodel.save()
previdmodelid = new_previdmodel
debugPrint("prevID %s saved" % previdmodelid)
else:
debugPrint("no prevID")
#Mapping GroupID model
if j.get_groupID() != []:
debugPrint("groupID exists")
groupIDlist = j.get_groupID()
debugPrint("len(groupIDlist)=%s" % len(groupIDlist))
for jg in j.groupID:
debugPrint("groupID exists %s" % jg)
new_groupidmodel = GroupID_model(
bdml = bdmlid,
component = component_dbid,
data = dataid,
groupID = jg,
)
debugPrint("saving groupID")
new_groupidmodel.save()
groupidmodelid = new_groupidmodel
debugPrint("groupID %s saved" % groupidmodelid)
else:
debugPrint("no groupID")
#Mapping Measurement model
for k in j.get_measurement():
new_measurement = Measurement_model(
bdml = bdmlid,
objectRef=k.get_objectRef(),
component = component_dbid,
)
debugPrint("saving measurement bdml %s, objectRef %s, \
component %s" \
% (bdmlid, k.get_objectRef(), component_dbid))
new_measurement.save()
measurementid = new_measurement
debugPrint("measurement %s saved" %measurementid)
#Mapping Line model to Entity model
#checking if it is empty before proceed
if k.get_line() != None:
debugPrint("found line")
debugPrint("creating entity")
lines_coords = k.get_line().get_coords()
debugPrint("lines coords %s" % lines_coords)
# coordinates of lines; linecoords="coordindates of each line"
for linecoords in lines_coords:
entityid = create_entity(bdmlid, component_dbid, measurementid, 'line')
# debugPrint("line: bdmlid %s, entityid %s, linecoords %s, time %s " % (bdmlid, entityid, k.get_line().get_coords(), j.get_time() ))
debugPrint("line: bdmlid %s, entityid %s, linecoords %s, time %s " % (bdmlid, entityid, linecoords, j.get_time() ))
process_coordinates(bdmlid, entityid, linecoords, 'line', j.get_time(), None)
propertylist = k.get_line().get_property()
debugPrint("len(propertylist) %s" % len(propertylist))
if len(propertylist) != 0:
debugPrint("creating property")
# for l in propertylist:
# create_property(bdmlid, entityid, l)
create_property(bdmlid, entityid, propertylist)
else:
debugPrint("no property")
#TODO replicate sphere to circle
#Mapping Circle model to Entity model
if k.get_circle() != None:
debugPrint("found circle %s" % k.get_circle())
for l in k.get_circle():
entityid = create_entity(bdmlid, component_dbid, measurementid, 'circle')
process_coordinates(bdmlid, entityid, l.get_coords(), 'circle', j.get_time(), l.get_radius())
create_property(bdmlid, entityid, l.property)
#Mapping Sphere model to Entity model
if k.get_sphere() != None:
debugPrint("found sphere")
debugPrint("creating entity")
spheres_coords = k.get_sphere().get_coords()
debugPrint("spheres coords %s" % spheres_coords)
# coordinates of spheres; spheres_coords="coordindates of each sphere"
#TODO is there more than one coords in spheres_coords?
# for spherecoords in spheres_coords:
entityid = create_entity(bdmlid, component_dbid, measurementid, 'sphere')
debugPrint("sphere: bdmlid %s, entityid %s, spheres_coords %s, time %s, radius %s " % (bdmlid, entityid, spheres_coords, j.get_time(), k.get_sphere().get_radius()))
process_coordinates(bdmlid, entityid, spheres_coords, 'sphere', j.get_time(), k.get_sphere().get_radius())
propertylist = k.get_sphere().get_property()
debugPrint("len(propertylist) %s" % len(propertylist))
if len(propertylist) != 0:
debugPrint("creating property")
# for l in propertylist:
# create_property(bdmlid, entityid, l)
create_property(bdmlid, entityid, propertylist)
else:
debugPrint("no property")
#TODO replicate line to face?
#Mapping Face model to Entity model
if k.get_face() != None:
debugPrint("found face")
debugPrint("creating entity")
face_coords = k.get_face().get_coords()
debugPrint("face coords %s" % face_coords)
# coordinates of lines; linecoords="coordindates of each line"
for facecoords in face_coords:
entityid = create_entity(bdmlid, component_dbid, measurementid, 'face')
debugPrint("face: bdmlid %s, entityid %s, facecoords %s, time %s " % (bdmlid, entityid, facecoords, j.get_time() ))
process_coordinates(bdmlid, entityid, facecoords, 'face', j.get_time(), None)
# if k.get_face().get_property() != None:
propertylist = k.get_face().get_property()
debugPrint("len(propertylist) %s" % len(propertylist))
if len(propertylist) != 0:
debugPrint("creating property")
# for l in propertylist:
# create_property(bdmlid, entityid, l)
create_property(bdmlid, entityid, propertylist)
else:
debugPrint("no property")
#Mapping Point model to Entity model
#checking if it is empty before proceed
#checking if it is empty before proceed
if k.get_point() != None:
debugPrint("found point")
debugPrint("creating entity")
pt_coords = k.get_point().get_coords()
debugPrint("Points coords %s" % pt_coords)
entityid = create_entity(bdmlid, component_dbid, measurementid, 'point')
debugPrint("point: bdmlid %s, entityid %s, coords %s, time %s " % (bdmlid, entityid, pt_coords, j.get_time() ))
process_coordinate(bdmlid, entityid, pt_coords, 'point', j.get_time(), None)
propertylist = k.get_point().get_property()
debugPrint("len(propertylist) %s" % len(propertylist))
if len(propertylist) != 0:
debugPrint("creating property")
# for l in propertylist:
# create_property(bdmlid, entityid, l)
create_property(bdmlid, entityid, propertylist)
else:
debugPrint("no property")
debugPrint("creating entity")
pt_coords = k.get_point().get_coords()
debugPrint("Points coords %s" % pt_coords)
entityid = create_entity(bdmlid, component_dbid, measurementid, 'point')
debugPrint("point: bdmlid %s, entityid %s, coords %s, time %s " % (bdmlid, entityid, pt_coords, j.get_time() ))
process_coordinate(bdmlid, entityid, pt_coords, 'point', j.get_time(), None)
propertylist = k.get_point().get_property()
debugPrint("len(propertylist) %s" % len(propertylist))
if len(propertylist) != 0:
debugPrint("creating property")
# for l in propertylist:
# create_property(bdmlid, entityid, l)
create_property(bdmlid, entityid, propertylist)
else:
debugPrint("no property")
#TODO replicate sphere to graph?
#Mapping Graph model to Entity model
if k.get_graph() != None:
debugPrint("found graph")
for l in k.get_graph():
entityid = create_entity(bdmlid, component_dbid, measurementid, 'graph')
process_coordinates(bdmlid, entityid, l.get_coords(), 'graph', j.get_time(), None)
create_property(bdmlid, entityid, l.property)
print "starting to save bdmldoc"
ver = "0.15"
uver = ver.decode('utf-8')
print("bdmldoc - uver :%s" % uver)
print("bdmldoc - uver type:%s" % type(uver))
print("bdmldoc - info:%s" % infoid.id)
print("bdmldoc - summary:%s" % summaryid.id)
print("bdmldoc - contact:%s" % contactid.id)
print("bdmldoc - methods:%s" % methodsid.id)
print("bdmldoc - data:%s" % dataid.id)
print("bdmldoc - bdmlid:%s" % bdmlid.id)
# print("bdmldoc - ownerid:%s" % ownerid.id)
print("bdmldoc - info:%s, version:%s, summary:%s, contact:%s, methods:%s, data:%s, bdml:%s" % (infoid, uver, summaryid, contactid, methodsid, dataid, bdmlid))
print("bdmldoc - info.id:%s, version:%s, summary.id:%s, contact.id:%s, methods.id:%s, data.id:%s, bdml.id:%s" % (infoid.id, ver, summaryid.id, contactid.id, methodsid.id, dataid.id, bdmlid.id))
# print("bdmldoc - info.id:%s, version:%s, summary.id:%s, contact.id:%s, methods.id:%s, data.id:%s, bdml.id:%s, owner.id: %s" % (infoid.id, ver, summaryid.id, contactid.id, methodsid.id, dataid.id, bdmlid.id, ownerid.id))
# print("bdmldoc - ver type:%s" % type(ver))
# uver = unicode( ver )
# print("bdmldoc - uver:%s" % uver)
# print("bdmldoc - ver type:%s" % type(ver))
new_bdmldoc = bdmlDocument_model(
bdml = bdmlid,
# version = uver,
info = infoid,
summary = summaryid,
contact = contactid,
methods = methodsid,
data = dataid,
# owner = ownerid,
)
# new_bdmldoc = bdmlDocument_model(
# bdml = bdmlid.id,
# version = uver,
# info = infoid.id,
# summary = summaryid.id,
# contact = contactid.id,
# methods = methodsid.id,
# data = dataid.id,
# owner = ownerid,
# )
print("saving (Y) bdmldoc")
print("bdmldoc: bdml:%s, version:%s, info:%s, summary%s, contact:%s, methods:%s, data:%s" % (new_bdmldoc.bdml, new_bdmldoc.version, new_bdmldoc.info, new_bdmldoc.summary, new_bdmldoc.contact, new_bdmldoc.methods, new_bdmldoc.data))
new_bdmldoc.save()
print("bdml file is saved")
bdmldocid = new_bdmldoc
print("bdmldoc saved, bdmldocid:%s" %bdmldocid)
outdata = {
'details': "bdml %s is saved in the database" % new_info.bdmlID,
'error': "none",
}
except Exception as e:
outdata = {
'details': "%s" % e,
'error': "cannot save in the database",
}
else:
return outdata
#debugPrint outdata
#jsonlist = json.dumps(outdata)
#return HttpResponse(jsonlist, mimetype='application/javascript; charset=UTF-8')
def create_entity(instance_bdmlid, instance_componentid, instance_measurementid, instance_type):
debugPrint("creating entity now")
new_entity = Entity_model(
bdml = instance_bdmlid,
component = instance_componentid,
measurement = instance_measurementid,
entitytype = instance_type,
)
debugPrint("saving entity, component %s, measurement %s, entitytype %s" % (new_entity.component, new_entity.measurement, new_entity.entitytype))
new_entity.save()
instance_entityid = new_entity
debugPrint("entity %s saved" % instance_entityid)
# debugPrint l.coordList
return instance_entityid
def create_property(instance_bdmlid, instance_entityid, instance_propertylist):
if len(instance_propertylist) != 0:
debugPrint("propertylist len %s" % len(instance_propertylist))
for p in instance_propertylist:
debugPrint("p %s" % p)
debugPrint("property ref %s, " % p.featureRef)
debugPrint("feature val %s, " % p.featureValue)
debugPrint("entity %s, " % instance_entityid)
debugPrint("bdml %s, " % instance_bdmlid)
new_property = Property_model(
featureRef = p.featureRef,
featureValue = p.featureValue,
entity = instance_entityid,
bdml = instance_bdmlid,
)
debugPrint("saving property")
new_property.save()
propertyid = new_property
debugPrint("property %s saved" %propertyid)
# return propertyid
def process_coordinates(instance_bdmlid, instance_id, instance_coordslist, instance_typeid, instance_time, instance_radius):
debugPrint("processing coordinates")
debugPrint("coordslist %s" % instance_coordslist)
coords = instance_coordslist.split(' ')
for a in coords:
debugPrint("a %s" % a)
b = a.split(',')
debugPrint("b %s" % b)
debugPrint("x=%f" % float(b[0]))
debugPrint("y=%f" % float(b[1]))
debugPrint("z=%f" % float(b[2]))
debugPrint("t=%f" % float(instance_time))
if instance_radius==None:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
else:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
radius = instance_radius,
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
debugPrint("saving coordinate")
new_coord.save()
coordid = new_coord
debugPrint("coordinate %s saved" %coordid)
# return coordid
# return coordid
def process_coordinate(instance_bdmlid, instance_id, instance_coords, instance_typeid, instance_time, instance_radius):
debugPrint("processing coordinates")
debugPrint("coords %s" % instance_coords)
# coords = instance_coordslist.split(' ')
# for a in coords:
# debugPrint("a %s" % a)
b = instance_coords[0].split(',')
debugPrint("b %s" % b)
debugPrint("x=%f" % float(b[0]))
debugPrint("y=%f" % float(b[1]))
debugPrint("z=%f" % float(b[2]))
debugPrint("t=%f" % float(instance_time))
if instance_radius==None:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
else:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
radius = instance_radius,
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
debugPrint("saving coordinate")
new_coord.save()
coordid = new_coord
debugPrint("coordinate %s saved" %coordid)
def process_coordinate(instance_bdmlid, instance_id, instance_coords, instance_typeid, instance_time, instance_radius):
debugPrint("processing coordinates")
debugPrint("coords %s" % instance_coords)
# coords = instance_coordslist.split(' ')
# for a in coords:
# debugPrint("a %s" % a)
b = instance_coords[0].split(',')
debugPrint("b %s" % b)
debugPrint("x=%f" % float(b[0]))
debugPrint("y=%f" % float(b[1]))
debugPrint("z=%f" % float(b[2]))
debugPrint("t=%f" % float(instance_time))
if instance_radius==None:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
else:
new_coord = Coordinates_model(
x = float(b[0]),
y = float(b[1]),
z = float(b[2]),
t = float(instance_time),
radius = instance_radius,
entitytype = instance_typeid,
bdml = instance_bdmlid,
entity = instance_id,
)
debugPrint("saving coordinate")
new_coord.save()
coordid = new_coord
debugPrint("coordinate %s saved" %coordid)
# return coordid
@csrf_exempt
@transaction.commit_on_success
def qdb_data(request, instance_BDMLID):
"""
removing bdml data from the database
: param: instance_BDMLID - bdml's unique ID in BDMLDOCUMENT_Model. Note that it is not the BDML_UUID
"""
#TODO unfinished - need checking
if request.method == "DELETE":
instanceID = bdml_model.objects.filter(id=instance_BDMLID)
if len(instanceID) == 1:
debugPrint("found BDML %s" % instanceID)
Coordinates_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Coordinates")
Property_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Properties")
Entity_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Entities")
Measurement_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Measurement")
Component_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Components")
Feature_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Feature")
Object_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Object")
PrevID_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML PrevID")
Data_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Data")
ScaleUnit_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML ScaleType")
Methods_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Methods")
Summary_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Summary")
Contact_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Contact")
Info_model.objects.filter(bdml=instance_BDMLID).delete()
debugPrint("deleted BDML Info")
bdmlDocument_model.objects.filter(id=instance_BDMLID).delete()
debugPrint("deleted BDML Document")
bdml_model.objects.filter(id=instance_BDMLID).delete()
debugPrint("deleted BDML")
outdata = {'error' : "none", }
elif len(instanceID) == 0:
debugPrint("No BDML ID%s found" %instance_BDMLID)
outdata = {'error' : "no bdml ID%s found" %instance_BDMLID}
else:
debugPrint("error: IDs in database %s" % instanceID)
outdata = {'error' : "inconsistency in the database, more than 1 ID found? %s" %instance_BDMLID}
else:
outdata = {'error' : "wrong method?", }
debugPrint(outdata)
jsonlist = json.dumps(outdata)
return HttpResponse(jsonlist, mimetype='application/javascript; charset=UTF-8')
#TODO create level to control the necessary print statement
def debugPrint(string):
# if __debug__:
if myDebug==True:
print string
else:
return
|
gpl-3.0
| 288,396,403,755,675,300 | 43.993096 | 239 | 0.544234 | false |
UIKit0/friture
|
friture/octavespectrum.py
|
1
|
9254
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Timoth?Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtWidgets
from numpy import log10, array, arange, where
from friture.logger import PrintLogger
from friture.histplot import HistPlot
from friture.octavespectrum_settings import (OctaveSpectrum_Settings_Dialog, # settings dialog
DEFAULT_SPEC_MIN,
DEFAULT_SPEC_MAX,
DEFAULT_WEIGHTING,
DEFAULT_BANDSPEROCTAVE,
DEFAULT_RESPONSE_TIME)
from friture.filter import (octave_filter_bank_decimation, octave_frequencies,
octave_filter_bank_decimation_filtic, NOCTAVE)
from friture.exp_smoothing_conv import pyx_exp_smoothed_value
from friture import generated_filters
from friture.audiobackend import SAMPLING_RATE
import friture.renard as renard
SMOOTH_DISPLAY_TIMER_PERIOD_MS = 25
class OctaveSpectrum_Widget(QtWidgets.QWidget):
def __init__(self, parent, logger=PrintLogger()):
super().__init__(parent)
self.logger = logger
self.audiobuffer = None
self.setObjectName("Spectrum_Widget")
self.gridLayout = QtWidgets.QGridLayout(self)
self.gridLayout.setObjectName("gridLayout")
self.PlotZoneSpect = HistPlot(self, self.logger)
self.PlotZoneSpect.setObjectName("PlotZoneSpect")
self.gridLayout.addWidget(self.PlotZoneSpect, 0, 0, 1, 1)
self.spec_min = DEFAULT_SPEC_MIN
self.spec_max = DEFAULT_SPEC_MAX
self.weighting = DEFAULT_WEIGHTING
self.response_time = DEFAULT_RESPONSE_TIME
self.PlotZoneSpect.setspecrange(self.spec_min, self.spec_max)
self.PlotZoneSpect.setweighting(self.weighting)
self.filters = octave_filters(DEFAULT_BANDSPEROCTAVE)
self.dispbuffers = [0] * DEFAULT_BANDSPEROCTAVE * NOCTAVE
# set kernel and parameters for the smoothing filter
self.setresponsetime(self.response_time)
# initialize the settings dialog
self.settings_dialog = OctaveSpectrum_Settings_Dialog(self, self.logger)
# method
def set_buffer(self, buffer):
self.audiobuffer = buffer
def compute_kernels(self, alphas, Ns):
kernels = []
for alpha, N in zip(alphas, Ns):
kernels += [(1. - alpha) ** arange(N - 1, -1, -1)]
return kernels
def get_kernel(self, kernel, N):
return
def get_conv(self, kernel, data):
return kernel * data
def exp_smoothed_value(self, kernel, alpha, data, previous):
N = len(data)
if N == 0:
return previous
else:
value = alpha * (kernel[-N:] * data).sum() + previous * (1. - alpha) ** N
return value
def handle_new_data(self, floatdata):
# the behaviour of the filters functions is sometimes
# unexpected when they are called on empty arrays
if floatdata.shape[1] == 0:
return
# for now, take the first channel only
floatdata = floatdata[0, :]
# compute the filters' output
y, decs_unused = self.filters.filter(floatdata)
# compute the widget data
sp = [pyx_exp_smoothed_value(kernel, alpha, bankdata ** 2, old) for bankdata, kernel, alpha, old in zip(y, self.kernels, self.alphas, self.dispbuffers)]
# store result for next computation
self.dispbuffers = sp
sp = array(sp)
if self.weighting is 0:
w = 0.
elif self.weighting is 1:
w = self.filters.A
elif self.weighting is 2:
w = self.filters.B
else:
w = self.filters.C
epsilon = 1e-30
db_spectrogram = 10 * log10(sp + epsilon) + w
self.PlotZoneSpect.setdata(self.filters.flow, self.filters.fhigh, self.filters.f_nominal, db_spectrogram)
# method
def canvasUpdate(self):
if not self.isVisible():
return
self.PlotZoneSpect.draw()
def setmin(self, value):
self.spec_min = value
self.PlotZoneSpect.setspecrange(self.spec_min, self.spec_max)
def setmax(self, value):
self.spec_max = value
self.PlotZoneSpect.setspecrange(self.spec_min, self.spec_max)
def setweighting(self, weighting):
self.weighting = weighting
self.PlotZoneSpect.setweighting(weighting)
def setresponsetime(self, response_time):
# time = SMOOTH_DISPLAY_TIMER_PERIOD_MS/1000. #DISPLAY
# time = 0.025 #IMPULSE setting for a sound level meter
# time = 0.125 #FAST setting for a sound level meter
# time = 1. #SLOW setting for a sound level meter
self.response_time = response_time
# an exponential smoothing filter is a simple IIR filter
# s_i = alpha*x_i + (1-alpha)*s_{i-1}
# we compute alpha so that the N most recent samples represent 100*w percent of the output
w = 0.65
decs = self.filters.get_decs()
ns = [self.response_time * SAMPLING_RATE / dec for dec in decs]
Ns = [2 * 4096 / dec for dec in decs]
self.alphas = [1. - (1. - w) ** (1. / (n + 1)) for n in ns]
# print(ns, Ns)
self.kernels = self.compute_kernels(self.alphas, Ns)
def setbandsperoctave(self, bandsperoctave):
self.filters.setbandsperoctave(bandsperoctave)
# recreate the ring buffers
self.dispbuffers = [0] * bandsperoctave * NOCTAVE
# reset kernel and parameters for the smoothing filter
self.setresponsetime(self.response_time)
def settings_called(self, checked):
self.settings_dialog.show()
def saveState(self, settings):
self.settings_dialog.saveState(settings)
def restoreState(self, settings):
self.settings_dialog.restoreState(settings)
class octave_filters():
def __init__(self, bandsperoctave):
[self.bdec, self.adec] = generated_filters.PARAMS['dec']
self.setbandsperoctave(bandsperoctave)
def filter(self, floatdata):
y, dec, zfs = octave_filter_bank_decimation(self.bdec, self.adec,
self.boct, self.aoct,
floatdata, zis=self.zfs)
self.zfs = zfs
return y, dec
def get_decs(self):
decs = [2 ** j for j in range(0, NOCTAVE)[::-1] for i in range(0, self.bandsperoctave)]
return decs
def setbandsperoctave(self, bandsperoctave):
self.bandsperoctave = bandsperoctave
self.nbands = NOCTAVE * self.bandsperoctave
self.fi, self.flow, self.fhigh = octave_frequencies(self.nbands, self.bandsperoctave)
[self.boct, self.aoct, fi, flow, fhigh] = generated_filters.PARAMS['%d' % bandsperoctave]
# [self.b_nodec, self.a_nodec, fi, fl, fh] = octave_filters(self.nbands, self.bandsperoctave)
f = self.fi
Rc = 12200. ** 2 * f ** 2 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2))
Rb = 12200. ** 2 * f ** 3 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2) * ((f ** 2 + 158.5 ** 2) ** 0.5))
Ra = 12200. ** 2 * f ** 4 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2) * ((f ** 2 + 107.7 ** 2) ** 0.5) * ((f ** 2 + 737.9 ** 2) ** 0.5))
self.C = 0.06 + 20. * log10(Rc)
self.B = 0.17 + 20. * log10(Rb)
self.A = 2.0 + 20. * log10(Ra)
self.zfs = octave_filter_bank_decimation_filtic(self.bdec, self.adec, self.boct, self.aoct)
if bandsperoctave == 1:
basis = renard.R5
elif bandsperoctave == 3:
basis = renard.R10
elif bandsperoctave == 6:
basis = renard.R20
elif bandsperoctave == 12:
basis = renard.R40
elif bandsperoctave == 24:
basis = renard.R80
else:
raise Exception("Unknown bandsperoctave: %d" % (bandsperoctave))
# search the index of 1 kHz, the reference
i = where(self.fi == 1000.)[0][0]
# build the frequency scale
self.f_nominal = []
k = 0
while len(self.f_nominal) < len(self.fi) - i:
self.f_nominal += ["{0:.{width}f}k".format(10 ** k * f, width=2 - k) for f in basis]
k += 1
self.f_nominal = self.f_nominal[:len(self.fi) - i]
k = 0
while len(self.f_nominal) < len(self.fi):
self.f_nominal = ["%d" % (10 ** (2 - k) * f) for f in basis] + self.f_nominal
k += 1
self.f_nominal = self.f_nominal[-len(self.fi):]
|
gpl-3.0
| 4,978,378,201,274,751,000 | 35.290196 | 160 | 0.596499 | false |
artekw/mqtt-panel
|
display.py
|
1
|
2265
|
import datetime
import time
from ht1632cpy import HT1632C
import settings
from lightsensor import getLight
interface = HT1632C(2, 0)
interface.pwm(settings.read('settings', 'matrix', 'default_brightness'))
def dimmer():
dimmer_brightness = settings.read('settings', 'matrix', 'dimmer_brightness')
default_brightness = settings.read('settings', 'matrix', 'default_brightness')
if getLight() == False:
interface.pwm(dimmer_brightness)
else:
interface.pwm(default_brightness)
def displayText(x, text, text_color, bg_color, delay):
interface.clear()
if text_color == 'text_green':
c_text = interface.GREEN
elif text_color == 'text_red':
c_text = interface.RED
elif text_color == 'text_orange':
c_text = interface.ORANGE
elif text_color == 'text_black':
c_text = interface.BLACK
if bg_color == 'bg_green':
c_bg = interface.GREEN
elif bg_color == 'bg_red':
c_bg = interface.RED
elif bg_color == 'bg_orange':
c_bg = interface.ORANGE
elif bg_color == 'bg_black':
c_bg = interface.BLACK
if c_text == c_bg:
c_text = interface.GREEN
c_bg = interface.BLACK
if not text_color:
c_text = interface.GREEN
if not bg_color:
c_bg = interface.BLACK
interface.box(0, 0, interface.width(), interface.height(), c_bg)
for c in text:
interface.putchar(x, 4, c, interface.font6x8, c_text, c_bg)
x += interface.fontwidth(interface.font6x8)
interface.sendframe()
time.sleep(float(delay))
def clock():
now = datetime.datetime.now()
hour = str(now.hour).zfill(2)
minute = str(now.minute).zfill(2)
second = str(now.second).zfill(2)
interface.clear()
x = 5
dividers = 2
for section in (hour, minute, second):
for c in section:
interface.putchar(x, 4, c, interface.font7x8num, interface.GREEN, interface.BLACK)
x += interface.fontwidth(interface.font7x8num)
if dividers > 0:
interface.putchar(x, 4, ':', interface.font6x8, interface.GREEN, interface.BLACK)
x += interface.fontwidth(interface.font6x8) - 1
dividers -= 1
interface.sendframe()
|
mit
| 5,548,840,018,202,119,000 | 27.325 | 94 | 0.618985 | false |
Slko/Slot
|
slot/handlers/text_generation.py
|
1
|
2447
|
import asyncio
import random
from slot import requires
from slot.handlers import BaseHandler, MessageHandled, command, unhandled_messages
class PoemHandler(BaseHandler):
def generate_poem(self, start=None):
textgen = self.bot.extensions["textgen"]
rhyme = self.bot.extensions["rhyme"]
lines = []
lines_rhymed = []
for i in range(2):
while True:
line = textgen.generate_sentence(start=None if i != 0 else start)
rhymed = rhyme.find_rhyme(line)
if rhymed is None:
continue
lines.append(line)
lines_rhymed.append(rhymed)
break
return "\n".join(lines + lines_rhymed)
@command("стихи")
@command("ситхи")
@command("stihi")
@requires("extension", "rhyme")
@asyncio.coroutine
def poem(self, msg, cmd, args):
yield from msg.conversation.reply(msg.protocol().escape_unsafe(self.generate_poem(args)))
raise MessageHandled
@command("гш")
@command("gsh")
@asyncio.coroutine
def gsh(self, msg, cmd, args):
textgen = self.bot.extensions["textgen"]
yield from msg.conversation.reply(msg.protocol().escape_unsafe(textgen.generate(random.randint(10, 20),
start=args)))
raise MessageHandled
@command("мегаслот")
@command("megaslot")
@asyncio.coroutine
def megaslot(self, msg, cmd, args):
textgen = self.bot.extensions["textgen"]
result = []
result.extend(textgen.generate_sentence(random.randint(5, 7), start=args if i == 0 else None) for i in range(2))
result.extend(textgen.generate_sentence(result[i].count(" ") + 1) for i in range(2))
yield from msg.conversation.reply(msg.protocol().escape_unsafe("\n".join(result)))
raise MessageHandled
@unhandled_messages
@asyncio.coroutine
def name(self, msg, mine):
if mine:
return
textgen = self.bot.extensions["textgen"]
if "слот" in msg.text.casefold():
yield from msg.conversation.reply(msg.protocol().escape_unsafe(textgen.generate(random.randint(5, 7))))
raise MessageHandled
@requires("API", 1)
@requires("extension", "textgen")
def factory(bot, config):
return PoemHandler(bot, config)
|
bsd-3-clause
| -6,882,219,567,834,149,000 | 30.881579 | 120 | 0.598019 | false |
fhqgfss/MoHa
|
moha/posthf/cc/ccsd.py
|
1
|
9945
|
import numpy as np
def spinfock(eorbitals):
"""
"""
if type(eorbitals) is np.ndarray:
dim = 2*len(eorbitals)
fs = np.zeros(dim)
for i in range(0,dim):
fs[i] = eorbitals[i//2]
fs = np.diag(fs) # put MO energies in diagonal array
elif type(eorbitals) is dict:
dim = 2*len(eorbitals['alpha'])
fs = np.zeros(dim)
for i in range(0,dim):
if i%2==0:
fs[i] = eorbitals['alpha'][i//2]
elif i%2==0:
fs[i] = eorbitals['beta'][i//2]
fs = np.diag(fs) # put MO energies in diagonal array
return fs
def initialize(fs,spinints,Nelec,dim):
"""
Init empty T1 (ts) and T2 (td) arrays
and make denominator arrays Dai, Dabij
"""
# Initial guess for T1 and T2
ts = np.zeros((dim,dim))
td = np.zeros((dim,dim,dim,dim))
for a in range(Nelec,dim):
for b in range(Nelec,dim):
for i in range(0,Nelec):
for j in range(0,Nelec):
td[a,b,i,j] += spinints[i,j,a,b]/(fs[i,i] + fs[j,j] - fs[a,a] - fs[b,b])
# Equation (12) of Stanton
Dai = np.zeros((dim,dim))
for a in range(Nelec,dim):
for i in range(0,Nelec):
Dai[a,i] = fs[i,i] - fs[a,a]
# Stanton eq (13)
Dabij = np.zeros((dim,dim,dim,dim))
for a in range(Nelec,dim):
for b in range(Nelec,dim):
for i in range(0,Nelec):
for j in range(0,Nelec):
Dabij[a,b,i,j] = fs[i,i] + fs[j,j] - fs[a,a] - fs[b,b]
return ts,td,Dai,Dabij
# Stanton eq (9)
def taus(ts,td,a,b,i,j):
taus = td[a,b,i,j] + 0.5*(ts[a,i]*ts[b,j] - ts[b,i]*ts[a,j])
return taus
# Stanton eq (10)
def tau(ts,td,a,b,i,j):
tau = td[a,b,i,j] + ts[a,i]*ts[b,j] - ts[b,i]*ts[a,j]
return tau
# We need to update our intermediates at the beginning, and
# at the end of each iteration. Each iteration provides a new
# guess at the amplitudes T1 (ts) and T2 (td), that *hopefully*
# converges to a stable, ground-state, solution.
def updateintermediates(x,Nelec,dim,fs,spinints,ts,td):
if x == True:
# Stanton eq (3)
Fae = np.zeros((dim,dim))
for a in range(Nelec,dim):
for e in range(Nelec,dim):
Fae[a,e] = (1 - (a == e))*fs[a,e]
for m in range(0,Nelec):
Fae[a,e] += -0.5*fs[m,e]*ts[a,m]
for f in range(Nelec,dim):
Fae[a,e] += ts[f,m]*spinints[m,a,f,e]
for n in range(0,Nelec):
Fae[a,e] += -0.5*taus(ts,td,a,f,m,n)*spinints[m,n,e,f]
# Stanton eq (4)
Fmi = np.zeros((dim,dim))
for m in range(0,Nelec):
for i in range(0,Nelec):
Fmi[m,i] = (1 - (m == i))*fs[m,i]
for e in range(Nelec,dim):
Fmi[m,i] += 0.5*ts[e,i]*fs[m,e]
for n in range(0,Nelec):
Fmi[m,i] += ts[e,n]*spinints[m,n,i,e]
for f in range(Nelec,dim):
Fmi[m,i] += 0.5*taus(ts,td,e,f,i,n)*spinints[m,n,e,f]
# Stanton eq (5)
Fme = np.zeros((dim,dim))
for m in range(0,Nelec):
for e in range(Nelec,dim):
Fme[m,e] = fs[m,e]
for n in range(0,Nelec):
for f in range(Nelec,dim):
Fme[m,e] += ts[f,n]*spinints[m,n,e,f]
# Stanton eq (6)
Wmnij = np.zeros((dim,dim,dim,dim))
for m in range(0,Nelec):
for n in range(0,Nelec):
for i in range(0,Nelec):
for j in range(0,Nelec):
Wmnij[m,n,i,j] = spinints[m,n,i,j]
for e in range(Nelec,dim):
Wmnij[m,n,i,j] += ts[e,j]*spinints[m,n,i,e] - ts[e,i]*spinints[m,n,j,e]
for f in range(Nelec,dim):
Wmnij[m,n,i,j] += 0.25*tau(ts,td,e,f,i,j)*spinints[m,n,e,f]
# Stanton eq (7)
Wabef = np.zeros((dim,dim,dim,dim))
for a in range(Nelec,dim):
for b in range(Nelec,dim):
for e in range(Nelec,dim):
for f in range(Nelec,dim):
Wabef[a,b,e,f] = spinints[a,b,e,f]
for m in range(0,Nelec):
Wabef[a,b,e,f] += -ts[b,m]*spinints[a,m,e,f] + ts[a,m]*spinints[b,m,e,f]
for n in range(0,Nelec):
Wabef[a,b,e,f] += 0.25*tau(ts,td,a,b,m,n)*spinints[m,n,e,f]
# Stanton eq (8)
Wmbej = np.zeros((dim,dim,dim,dim))
for m in range(0,Nelec):
for b in range(Nelec,dim):
for e in range(Nelec,dim):
for j in range(0,Nelec):
Wmbej[m,b,e,j] = spinints[m,b,e,j]
for f in range(Nelec,dim):
Wmbej[m,b,e,j] += ts[f,j]*spinints[m,b,e,f]
for n in range(0,Nelec):
Wmbej[m,b,e,j] += -ts[b,n]*spinints[m,n,e,j]
for f in range(Nelec,dim):
Wmbej[m,b,e,j] += -(0.5*td[f,b,j,n] + ts[f,j]*ts[b,n])*spinints[m,n,e,f]
return Fae, Fmi, Fme, Wmnij, Wabef, Wmbej
# makeT1 and makeT2, as they imply, construct the actual amplitudes necessary for computing
# the CCSD energy (or computing an EOM-CCSD Hamiltonian, etc)
# Stanton eq (1)
def makeT1(x,Nelec,dim,fs,spinints,ts,td,Dai,Fae,Fmi,Fme):
if x == True:
tsnew = np.zeros((dim,dim))
for a in range(Nelec,dim):
for i in range(0,Nelec):
tsnew[a,i] = fs[i,a]
for e in range(Nelec,dim):
tsnew[a,i] += ts[e,i]*Fae[a,e]
for m in range(0,Nelec):
tsnew[a,i] += -ts[a,m]*Fmi[m,i]
for e in range(Nelec,dim):
tsnew[a,i] += td[a,e,i,m]*Fme[m,e]
for f in range(Nelec,dim):
tsnew[a,i] += -0.5*td[e,f,i,m]*spinints[m,a,e,f]
for n in range(0,Nelec):
tsnew[a,i] += -0.5*td[a,e,m,n]*spinints[n,m,e,i]
for n in range(0,Nelec):
for f in range(Nelec,dim):
tsnew[a,i] += -ts[f,n]*spinints[n,a,i,f]
tsnew[a,i] = tsnew[a,i]/Dai[a,i]
return tsnew
# Stanton eq (2)
def makeT2(x,Nelec,dim,fs,spinints,ts,td,Dabij,Fae,Fmi,Fme,Wmnij,Wabef,Wmbej):
if x == True:
tdnew = np.zeros((dim,dim,dim,dim))
for a in range(Nelec,dim):
for b in range(Nelec,dim):
for i in range(0,Nelec):
for j in range(0,Nelec):
tdnew[a,b,i,j] += spinints[i,j,a,b]
for e in range(Nelec,dim):
tdnew[a,b,i,j] += td[a,e,i,j]*Fae[b,e] - td[b,e,i,j]*Fae[a,e]
for m in range(0,Nelec):
tdnew[a,b,i,j] += -0.5*td[a,e,i,j]*ts[b,m]*Fme[m,e] + 0.5*td[a,e,i,j]*ts[a,m]*Fme[m,e]
continue
for m in range(0,Nelec):
tdnew[a,b,i,j] += -td[a,b,i,m]*Fmi[m,j] + td[a,b,j,m]*Fmi[m,i]
for e in range(Nelec,dim):
tdnew[a,b,i,j] += -0.5*td[a,b,i,m]*ts[e,j]*Fme[m,e] + 0.5*td[a,b,i,m]*ts[e,i]*Fme[m,e]
continue
for e in range(Nelec,dim):
tdnew[a,b,i,j] += ts[e,i]*spinints[a,b,e,j] - ts[e,j]*spinints[a,b,e,i]
for f in range(Nelec,dim):
tdnew[a,b,i,j] += 0.5*tau(ts,td,e,f,i,j)*Wabef[a,b,e,f]
continue
for m in range(0,Nelec):
tdnew[a,b,i,j] += -ts[a,m]*spinints[m,b,i,j] + ts[b,m]*spinints[m,a,i,j]
for e in range(Nelec,dim):
tdnew[a,b,i,j] += td[a,e,i,m]*Wmbej[m,b,e,j] - ts[e,i]*ts[a,m]*spinints[m,b,e,j]
tdnew[a,b,i,j] += -td[a,e,j,m]*Wmbej[m,b,e,i] + ts[e,j]*ts[a,m]*spinints[m,b,e,i]
tdnew[a,b,i,j] += -td[b,e,i,m]*Wmbej[m,a,e,j] - ts[e,i]*ts[b,m]*spinints[m,a,e,j]
tdnew[a,b,i,j] += td[b,e,j,m]*Wmbej[m,a,e,i] - ts[e,j]*ts[b,m]*spinints[m,a,e,i]
continue
for n in range(0,Nelec):
tdnew[a,b,i,j] += 0.5*tau(ts,td,a,b,m,n)*Wmnij[m,n,i,j]
continue
tdnew[a,b,i,j] = tdnew[a,b,i,j]/Dabij[a,b,i,j]
return tdnew
# Expression from Crawford, Schaefer (2000)
# DOI: 10.1002/9780470125915.ch2
# Equation (134) and (173)
# computes CCSD energy given T1 and T2
def ccsdenergy(Nelec,dim,fs,spinints,ts,td):
ECCSD = 0.0
for i in range(0,Nelec):
for a in range(Nelec,dim):
ECCSD += fs[i,a]*ts[a,i]
for j in range(0,Nelec):
for b in range(Nelec,dim):
ECCSD += 0.25*spinints[i,j,a,b]*td[a,b,i,j] + 0.5*spinints[i,j,a,b]*(ts[a,i])*(ts[b,j])
return ECCSD
#######################################################
#
# CCSD CALCULATION
#
#######################################################
class CCSolver(object):
def __init__(self,maxiter,cutoff):
self.maxiter = maxiter
self.cutoff = cutoff
@classmethod
def ccsd(cls,hfwavefunction,hamiltonian,maxiter=100,cutoff=1e-6):
occ = hfwavefunction.occ
Nelec = occ['alpha'] + occ['beta']
C = hfwavefunction.coefficient
dim = hamiltonian.dim*2
#Transfer Fock integral from spatial to spin basis
fs = spinfock(hfwavefunction.eorbitals)
#Transfer electron repulsion integral from atomic basis
#to molecular basis
hamiltonian.operators['electron_repulsion'].basis_transformation(C)
#build double bar integral <ij||kl>
spinints = hamiltonian.operators['electron_repulsion'].double_bar
ts,td,Dai,Dabij = initialize(fs,spinints,Nelec,dim)
ECCSD = 0
DECC = 1.0
Iter = 0
print '{0:2s} {1:3s} {2:4s}'.format('Iter', 'ECC', 'Delta')
while DECC > cutoff or Iter> maxiter: # arbitrary convergence criteria
Iter += 1
OLDCC = ECCSD
Fae,Fmi,Fme,Wmnij,Wabef,Wmbej = updateintermediates(True,Nelec,dim,fs,spinints,ts,td)
ts = makeT1(True,Nelec,dim,fs,spinints,ts,td,Dai,Fae,Fmi,Fme)
td = makeT2(True,Nelec,dim,fs,spinints,ts,td,Dabij,Fae,Fmi,Fme,Wmnij,Wabef,Wmbej)
ECCSD = ccsdenergy(Nelec,dim,fs,spinints,ts,td)
DECC = abs(ECCSD - OLDCC)
print '{0:2d} {1:3f} {2:4f}'.format(Iter, ECCSD, DECC)
print 'CC iterations converged'
print "E(corr,CCSD) = ", ECCSD
|
mit
| 3,714,258,493,166,371,000 | 37.546512 | 102 | 0.519759 | false |
stxnext-kindergarten/presence-analyzer-dczuba
|
src/presence_analyzer/tests.py
|
1
|
15917
|
# -*- coding: utf-8 -*-
"""
Presence analyzer unit tests.
"""
import os.path
import json
import datetime
import unittest
from mock import patch
from random import randint
from presence_analyzer import main, views, utils, decorators, helpers
CURRENT_PATH = os.path.dirname(__file__)
TEST_DATA_CSV = os.path.join(
CURRENT_PATH, '..', '..', 'runtime', 'data', 'test_data.csv'
)
BAD_TEST_DATA_CSV = os.path.join(
CURRENT_PATH, '..', '..', 'runtime', 'data', 'bad_test_data.csv'
)
TEST_DATA_XML = os.path.join(
CURRENT_PATH, '..', '..', 'runtime', 'data', 'test_users.xml'
)
BAD_TEST_DATA_XML = os.path.join(
CURRENT_PATH, '..', '..', 'runtime', 'data', 'bad_test_users.xml'
)
VALID_HTML_MIME = ('text/html', 'text/html; charset=utf-8')
# pylint: disable=E1103
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
main.app.config.update({'DATA_XML': TEST_DATA_XML})
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page redirect.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 302)
assert resp.headers['Location'].endswith('/presence_weekday.html')
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 9)
self.assertDictEqual(
data[0],
{u'avatar': u'https://intranet.stxnext.pl:443/api/images/users/36',
u'name': u'Anna W.', u'user_id': 36}
)
def test_presence_start_end_view(self):
"""
Test user presence start-end view
"""
url = '/api/v1/presence_start_end/%d'
user_id = 11
resp = self.client.get(url % user_id)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 5)
self.assertEqual(len(data[0]), 3)
self.assertTrue('Mon' in data[0])
@patch.object(views.log, 'debug')
def test_presence_start_end_view_log(self, mock_logger):
"""
Test user presence start-end view for non-existing user
"""
url = '/api/v1/presence_start_end/%d'
user_id = 112312
resp = self.client.get(url % user_id)
mock_logger.assert_called_once_with('User %s not found!', user_id)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
def test_mean_time_weekday_view(self):
"""
Test daily mean time for user
"""
base_url = '/api/v1/mean_time_weekday/%d'
resp = self.client.get(base_url % 10)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 7)
self.assertListEqual(data[1], [u'Tue', 30047.0])
self.assertListEqual(data[6], [u'Sun', 0])
resp = self.client.get(base_url % 11)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 7)
self.assertListEqual(data[2], [u'Wed', 25321.0])
self.assertListEqual(data[6], [u'Sun', 0])
@patch.object(views.log, 'debug')
def test_mean_time_weekday_view_log(self, mock_logger):
"""
Checks if log.debug is called when requesting for non-existing user
"""
user_id = 31111111111111111
resp = self.client.get('/api/v1/mean_time_weekday/%d' % user_id)
mock_logger.assert_called_once_with('User %s not found!', user_id)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_start_end_mean_time(self):
"""
Test calculating start-end mean time
"""
users_data = utils.get_data()
user_mean_time_10 = utils.get_start_end_mean_time(users_data[10])
user_mean_time_11 = utils.get_start_end_mean_time(users_data[11])
self.assertEqual(len(user_mean_time_10), 3)
self.assertEqual(len(user_mean_time_11), 5)
self.assertIsInstance(user_mean_time_11[0], tuple)
self.assertIsInstance(user_mean_time_11[4], tuple)
self.assertIsInstance(user_mean_time_10[2], tuple)
self.assertIsInstance(user_mean_time_11[2][0], str)
self.assertIsInstance(user_mean_time_11[3][1], int)
self.assertIsInstance(user_mean_time_11[1][2], int)
# time value is in milliseconds
for row in user_mean_time_10:
self.assertTrue(0 <= row[1] < 24*60*60*1000,
msg="User#10, row data: %s" % str(row))
self.assertTrue(0 <= row[2] < 24*60*60*1000,
msg="User#10, row data: %s" % str(row))
for row in user_mean_time_11:
self.assertTrue(0 <= row[1] < 24*60*60*1000,
msg="User#11, row data: %s" % str(row))
self.assertTrue(0 <= row[2] < 24*60*60*1000,
msg="User#11, row data: %s" % str(row))
self.assertEqual(user_mean_time_10[1][0], "Wed")
self.assertEqual(user_mean_time_10[0][1], 34745000)
self.assertEqual(user_mean_time_10[1][2], 58057000)
self.assertEqual(user_mean_time_11[1][1], 33590000)
self.assertEqual(user_mean_time_11[1][2], 50154000)
self.assertEqual(user_mean_time_11[3][1], 35602000)
self.assertEqual(user_mean_time_11[4][2], 54242000)
def test_presence_weekday_view(self):
"""
Test daily user presence
"""
base_url = '/api/v1/presence_weekday/%d'
resp = self.client.get(base_url % 10)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 8)
self.assertListEqual(data[0], [u'Weekday', u'Presence (s)'])
self.assertListEqual(data[2], [u'Tue', 30047.0])
self.assertListEqual(data[6], [u'Sat', 0])
resp = self.client.get(base_url % 11)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 8)
self.assertListEqual(data[0], [u'Weekday', u'Presence (s)'])
self.assertListEqual(data[4], [u'Thu', 45968])
self.assertListEqual(data[6], [u'Sat', 0])
@patch.object(views.log, 'debug')
def test_presence_weekday_view_log(self, mock_logger):
"""
Test daily user presence for non-existing user
"""
user_id = 31111111111111111
resp = self.client.get('/api/v1/presence_weekday/%d' % user_id)
mock_logger.assert_called_once_with('User %s not found!', user_id)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_template_view(self):
"""
Test template_view view
"""
resp = self.client.get('/presence_weekday.html')
self.assertEqual(resp.status_code, 200)
self.assertIn(resp.content_type, VALID_HTML_MIME)
resp = self.client.get('mean_time_weekday.html')
self.assertEqual(resp.status_code, 200)
self.assertIn(resp.content_type, VALID_HTML_MIME)
resp = self.client.get('/presence_weekday_asdasd.html')
self.assertEqual(resp.status_code, 404)
self.assertIn(resp.content_type, VALID_HTML_MIME)
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
main.app.config.update({'DATA_XML': TEST_DATA_XML})
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
data = utils.get_data()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, data[10])
self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])
self.assertEqual(data[10][sample_date]['start'],
datetime.time(9, 39, 5))
def test_group_by_weekday(self):
"""
Test grouping by weekday
"""
data = utils.get_data()
user_10 = utils.group_by_weekday(data[10])
self.assertEqual(len(user_10), 7)
self.assertIsInstance(user_10, dict)
for i in xrange(7):
self.assertIn(i, user_10, "Iteration with i=%d" % i)
self.assertIsInstance(user_10[i], list)
self.assertEqual(user_10[0], [])
self.assertIsInstance(user_10[1][0], int)
def test_mean(self):
"""
Test calculation of mean
"""
self.assertEqual(utils.mean([]), 0)
self.assertIsInstance(utils.mean([]), int)
self.assertIsInstance(utils.mean([1, 2, 3]), float)
self.assertEqual(utils.mean([1, 2, 3]), 2)
self.assertEqual(utils.mean([a for a in xrange(-100, 101, -1)]), 0)
self.assertEqual(utils.mean(
[123, 234, 345, 456, 567, 678, 789, 890]), 510.25)
for j in [randint(2, 123) for _ in xrange(randint(2, 123))]:
self.assertEqual(utils.mean(xrange(1, j)), j/2.0,
"Iteration with: a=%s" % j)
def test_seconds_since_midnight(self):
"""
Test secounds since midnight calculation
"""
self.assertEquals(utils.seconds_since_midnight(
datetime.datetime(1, 1, 1)), 0)
self.assertIsInstance(utils.seconds_since_midnight(
datetime.datetime(1, 1, 1)), int)
self.assertEquals(utils.seconds_since_midnight(
datetime.datetime(1, 1, 1)), 0)
self.assertEquals(utils.seconds_since_midnight(
datetime.time(0, 0, 1)), 1)
self.assertEquals(utils.seconds_since_midnight(
datetime.time(12, 0, 0)), 43200)
def test_interval(self):
"""
Test interval calculation
"""
time_delta = datetime.timedelta(hours=4)
dd1 = datetime.datetime(2013, 5, 1, 12, 05, 04)
self.assertIsInstance(utils.interval(dd1-time_delta, dd1), int)
self.assertEqual(utils.interval(dd1-time_delta, dd1),
time_delta.seconds)
dd2 = datetime.datetime(2013, 5, 1, 1, 05, 04)
self.assertEqual(utils.interval(dd2-time_delta, dd2),
time_delta.seconds-24*60*60)
dt_now = datetime.datetime.now()
self.assertEqual(utils.interval(dt_now, dt_now), 0)
dd3 = datetime.time(12, 45, 34)
dd4 = datetime.time(11, 45, 34)
self.assertEqual(utils.interval(dd4, dd3), 60*60)
def test_get_user(self):
"""
Test for reading data from users.xml
"""
users = utils.get_users()
users_items = users.items()
self.assertEqual(len(users), 9)
self.assertIsInstance(users, dict)
self.assertIsInstance(users[122], dict)
self.assertIn(36, users)
self.assertIn(122, users)
self.assertIsInstance(users[122], dict)
self.assertEqual(len(users_items[1][1]), 2)
class PresenceAnalyzerUtilsWithBadDataTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
reload(decorators)
reload(utils)
main.app.config.update({'DATA_CSV': BAD_TEST_DATA_CSV})
main.app.config.update({'DATA_XML': BAD_TEST_DATA_XML})
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
@patch.object(utils.log, 'debug')
def test_get_data(self, mock_logger):
"""
Test parsing of CSV file with bad entries
"""
data = utils.get_data()
msg = 'Problem with line %d: '
mock_logger.assert_call_with(msg, 3, exc_info=True)
mock_logger.assert_call_with(msg, 8, exc_info=True)
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11])
self.assertEqual(len(data), 2)
self.assertEqual(len(data[10])+len(data[11]), 9)
def test_get_user(self):
"""
Test for reading data from users.xml with bad entries
"""
with self.assertRaises(AttributeError):
utils.get_users()
class PresenceAnalyzerDecoratorsTestCase(unittest.TestCase):
"""
Decorators functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
reload(decorators)
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
main.app.config.update({'DATA_XML': TEST_DATA_XML})
@patch.object(decorators.log, 'debug')
def test_get_data(self, mock_logger):
"""
Test cache decorator
"""
refresh_msg = 'Refreshing cache for %s'
retrieve_msg = 'Retrieving from cache %s'
data1 = utils.get_data()
key = helpers.generate_cache_key(utils.get_data, (), {})
mock_logger.assert_call_with(refresh_msg % key)
data2 = utils.get_data()
mock_logger.assert_call_with(retrieve_msg % key)
self.assertEqual(data1, data2)
class PresenceAnalyzerHelpersTestCase(unittest.TestCase):
"""
Helpers functions tests.
"""
def test_generate_cache_key(self):
"""
Test generating cache key
"""
key1 = helpers.generate_cache_key(utils.get_users, (), {})
key2 = helpers.generate_cache_key(utils.get_data, (), {})
key3 = helpers.generate_cache_key(utils.interval, (12, 32), {})
key4 = helpers.generate_cache_key(utils.interval, (),
{'end': 12, 'start': 32})
assert1 = 'presence_analyzer.utils.get_users:3527539:133156838395276'
assert2 = 'presence_analyzer.utils.get_data:3527539:133156838395276'
assert3 = 'presence_analyzer.utils.interval:3713076219329978631:' \
'133156838395276'
assert4 = 'presence_analyzer.utils.interval:3527539:' \
'5214707252506937883'
self.assertEqual(key1, assert1)
self.assertEqual(key2, assert2)
self.assertEqual(key3, assert3)
self.assertEqual(key4, assert4)
def suite():
"""
Default test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
test_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
test_suite.addTest(unittest.makeSuite(
PresenceAnalyzerUtilsWithBadDataTestCase))
test_suite.addTest(unittest.makeSuite(PresenceAnalyzerDecoratorsTestCase))
return test_suite
if __name__ == '__main__':
unittest.main()
|
mit
| -8,382,286,569,891,190,000 | 33.083512 | 79 | 0.591192 | false |
dennerlager/sepibrews
|
sepibrews/statemachine.py
|
1
|
2712
|
import logging
import logging.handlers
from multiprocessing import Queue
from execution_engine import ExecutionEngine
class StateMachine():
def __init__(self, brewView, tempControllerAddress, interfaceLock):
self.brewView = brewView
self.tempControllerAddress = tempControllerAddress
self.qToEe = Queue()
self.qFromEe = Queue()
self.ee = ExecutionEngine(self.tempControllerAddress,
self.qToEe,
self.qFromEe,
interfaceLock)
self.ee.start()
self.setupLogger()
def setupLogger(self):
self.logger = logging.getLogger('{}_{}'.format(
__name__, self.tempControllerAddress))
self.logger.setLevel(logging.INFO)
fh = logging.handlers.RotatingFileHandler('statemachine.log',
maxBytes=20000,
backupCount=5)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(process)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def start(self):
self.logger.info('start')
self.logger.info('selected recipe: {}'.format(self.brewView.getRecipe()))
self.qToEe.put('{} {}'.format('setRecipe', self.brewView.getRecipe()))
self.qToEe.put('start')
def stop(self):
self.logger.info('stop')
self.qToEe.put('stop')
def quit(self):
self.logger.info('quit')
self.qToEe.put('quit')
self.ee.join()
def updateViews(self):
self.logger.debug('update views')
self.clearQFromEe()
self.updateSv()
self.updatePv()
self.updateRemainingStepTime()
self.updateTotalRemainingTime()
def clearQFromEe(self):
for i in range(self.qFromEe.qsize()):
self.qFromEe.get()
def updateSv(self):
self.qToEe.put('getSv')
sv = self.qFromEe.get()
if not sv == None:
self.brewView.setSetValue(sv)
def updatePv(self):
self.qToEe.put('getPv')
pv = self.qFromEe.get()
if not pv == None:
self.brewView.setProcessValue(pv)
def updateRemainingStepTime(self):
self.qToEe.put('getRemainingStepTime')
rst = self.qFromEe.get()
if not rst == None:
self.brewView.setStepTimeLeft(rst)
def updateTotalRemainingTime(self):
self.qToEe.put('getTotalRemainingTime')
trt = self.qFromEe.get()
if not trt == None:
self.brewView.setTotalTimeLeft(trt)
|
gpl-3.0
| 3,199,371,516,686,894,000 | 32.481481 | 81 | 0.574115 | false |
pswaminathan/t1-python
|
terminalone/models/placementslot.py
|
1
|
3030
|
# -*- coding: utf-8 -*-
"""Provides placement_slot object."""
from __future__ import absolute_import
from datetime import datetime
from .. import t1types
from ..entity import Entity
from ..vendor import six
def update_low_priority(original, candidate):
for item, value in six.iteritems(candidate):
if item in original:
continue
original[item] = value
class PlacementSlot(Entity):
"""Site Placement for PMP-D."""
collection = 'placement_slots'
resource = 'placement_slot'
_relations = {
'placement',
}
defaults = {
'ad_slot': 1,
'auction_type': 'FIRST_PRICED',
'budget': 1.0,
'buy_price_type': 'CPM',
'end_date': datetime(2012, 12, 31, 0, 0, 0),
'est_volume': 0,
'frequency_amount': 1,
'frequency_interval': 'not-applicable',
'frequency_type': 'no-limit',
'sell_price': 0.0,
'sell_price_type': 'CPM',
'start_date': datetime(2012, 10, 1, 0, 0, 0),
'volume_unit': 'impressions',
}
_auction_types = t1types.enum({'FIRST_PRICED', 'SECOND_PRICED'},
'FIRST_PRICED')
_price_types = t1types.enum({'CPM', }, 'CPM')
_frequency_intervals = t1types.enum({'hour', 'day', 'week', 'month',
'campaign', 'not-applicable'},
'not-applicable')
_frequency_types = t1types.enum({'even', 'asap', 'no-limit'}, 'no-limit')
_volume_units = t1types.enum({'impressions', }, 'impressions')
_pull = {
'ad_slot': int,
'allow_remnant': t1types.int_to_bool,
'auction_type': None,
'budget': float,
'buy_price': float,
'buy_price_type': None,
'created_on': t1types.strpt,
'description': None,
'end_date': t1types.strpt,
'est_volume': float,
'frequency_amount': int,
'frequency_interval': None,
'frequency_type': None,
'height': int,
'id': int,
'prm_pub_ceiling': float,
'prm_pub_markup': float,
'sell_price': float,
'sell_price_type': None,
'site_placement_id': int,
'start_date': t1types.strpt,
'updated_on': t1types.strpt,
'version': int,
'volume_unit': None,
'width': int,
}
_push = _pull.copy()
_push.update({
'allow_remnant': int,
'auction_type': _auction_types,
'buy_price_type': _price_types,
'frequency_interval': _frequency_intervals,
'frequency_type': _frequency_types,
'sell_price_type': _price_types,
'volume_unit': _volume_units,
})
def __init__(self, session, properties=None, **kwargs):
super(PlacementSlot, self).__init__(session, properties, **kwargs)
def save(self, data=None, url=None):
"""Set defaults for object before saving"""
update_low_priority(self.properties, self.defaults)
super(PlacementSlot, self).save()
|
apache-2.0
| -3,337,957,810,650,184,000 | 31.934783 | 77 | 0.546865 | false |
community-ssu/telepathy-gabble
|
tests/twisted/test-fallback-socks5-proxy.py
|
1
|
13474
|
import socket
from gabbletest import (
exec_test, elem, elem_iq, sync_stream, make_presence, send_error_reply,
make_result_iq, sync_stream)
from servicetest import (
EventPattern, call_async, assertEquals, assertLength,
assertDoesNotContain)
from caps_helper import send_disco_reply
from bytestream import create_from_si_offer, BytestreamS5B
import ns
import constants as cs
from twisted.words.xish import xpath
import dbus
proxy_query_events = [
EventPattern('stream-iq', to='fallback1-proxy.localhost', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', to='fallback2-proxy.localhost', iq_type='get', query_ns=ns.BYTESTREAMS)]
proxy_port = {'fallback1-proxy.localhost': '12345', 'fallback2-proxy.localhost': '6789',
'fallback3-proxy.localhost': '3333', 'fallback4-proxy.localhost': '4444',
'fallback5-proxy.localhost': '5555', 'fallback6-proxy.localhost': '6666',}
def connect_and_announce_alice(q, bus, conn, stream):
q.forbid_events(proxy_query_events)
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
# Send Alice's presence
caps = { 'ext': '', 'ver': '0.0.0',
'node': 'http://example.com/fake-client0' }
presence = make_presence('alice@localhost/Test', caps=caps)
stream.send(presence)
disco_event = q.expect('stream-iq', to='alice@localhost/Test',
query_ns=ns.DISCO_INFO)
send_disco_reply(
stream, disco_event.stanza, [], [ns.TUBES, ns.FILE_TRANSFER])
sync_stream(q, stream)
q.unforbid_events(proxy_query_events)
def send_socks5_reply(stream, iq, jid=None, host=None, port=None):
if jid is None:
jid = iq['to']
if port is None:
port = proxy_port[jid]
if host is None:
host = '127.0.0.1'
reply = elem_iq(stream, 'result', id=iq['id'], from_=iq['to'])(
elem(ns.BYTESTREAMS, 'query')(
elem('streamhost', jid=jid, host=host, port=port)()))
stream.send(reply)
def wait_si_and_return_proxies(q, stream):
e = q.expect('stream-iq', to='alice@localhost/Test')
bytestream, profile = create_from_si_offer(stream, q, BytestreamS5B, e.stanza,
'test@localhost/Resource')
# Alice accepts the SI
result, si = bytestream.create_si_reply(e.stanza)
stream.send(result)
e = q.expect('stream-iq', to='alice@localhost/Test')
proxies = []
for node in xpath.queryForNodes('/iq/query/streamhost', e.stanza):
if node['jid'] == 'test@localhost/Resource':
# skip our own stream hosts
continue
proxies.append((node['jid'], node['host'], node['port']))
return proxies
def check_proxies(expected, proxies):
assertEquals(set(expected), set(proxies))
def offer_dbus_tube(q, bus, conn, stream):
connect_and_announce_alice(q, bus, conn, stream)
# Offer a private D-Bus tube just to check if the proxy is present in the
# SOCKS5 offer
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_DBUS_TUBE,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_ID: 'alice@localhost',
cs.DBUS_TUBE_SERVICE_NAME: 'com.example.TestCase'})
# Proxy queries are send when creating the channel
return_event, e1, e2 = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
proxy_query_events[0], proxy_query_events[1])
send_socks5_reply(stream, e1.stanza)
send_socks5_reply(stream, e2.stanza)
path, props = return_event.value
tube_chan = bus.get_object(conn.bus_name, path)
dbus_tube_iface = dbus.Interface(tube_chan, cs.CHANNEL_TYPE_DBUS_TUBE)
dbus_tube_iface.Offer({}, cs.SOCKET_ACCESS_CONTROL_CREDENTIALS)
proxies = wait_si_and_return_proxies(q, stream)
check_proxies([('fallback2-proxy.localhost', '127.0.0.1', '6789'),
('fallback1-proxy.localhost', '127.0.0.1', '12345')], proxies)
def accept_stream_tube(q, bus, conn, stream):
connect_and_announce_alice(q, bus, conn, stream)
# Accept a stream tube, we'll need SOCKS5 proxies each time we'll connect
# on the tube socket
# Alice offers us a stream tube
message = elem('message', to='test@localhost/Resource', from_='alice@localhost/Test')(
elem(ns.TUBES, 'tube', type='stream', service='http', id='10'))
stream.send(message)
# we are interested in the 'NewChannels' announcing the tube channel
def new_chan_predicate(e):
path, props = e.args[0][0]
return props[cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAM_TUBE
# Proxy queries are send when receiving an incoming stream tube
new_chan, e1, e2 = q.expect_many(
EventPattern('dbus-signal', signal='NewChannels', predicate=new_chan_predicate),
proxy_query_events[0], proxy_query_events[1])
send_socks5_reply(stream, e1.stanza)
send_socks5_reply(stream, e2.stanza)
path, props = new_chan.args[0][0]
assert props[cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAM_TUBE
tube_chan = bus.get_object(conn.bus_name, path)
tube_iface = dbus.Interface(tube_chan, cs.CHANNEL_TYPE_STREAM_TUBE)
# connect to the socket so a SOCKS5 bytestream will be created
address = tube_iface.Accept(cs.SOCKET_ADDRESS_TYPE_IPV4,
cs.SOCKET_ACCESS_CONTROL_LOCALHOST, 0, byte_arrays=True)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(address)
proxies = wait_si_and_return_proxies(q, stream)
check_proxies([('fallback2-proxy.localhost', '127.0.0.1', '6789'),
('fallback1-proxy.localhost', '127.0.0.1', '12345')], proxies)
def send_file_to_alice(q, conn):
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_ID: 'alice@localhost',
cs.FT_FILENAME: 'test.txt',
cs.FT_CONTENT_TYPE: 'text/plain',
cs.FT_SIZE: 10})
def send_file(q, bus, conn, stream):
connect_and_announce_alice(q, bus, conn, stream)
# Send a file; proxy queries are send when creating the FT channel
send_file_to_alice(q, conn)
return_event, e1, e2 = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
proxy_query_events[0], proxy_query_events[1])
send_socks5_reply(stream, e1.stanza)
send_socks5_reply(stream, e2.stanza)
# ensure that the same proxy is not queried more than once
q.forbid_events(proxy_query_events)
proxies = wait_si_and_return_proxies(q, stream)
check_proxies([('fallback2-proxy.localhost', '127.0.0.1', '6789'),
('fallback1-proxy.localhost', '127.0.0.1', '12345')], proxies)
def double_server(q, bus, conn, stream):
# For some reason the 2 proxies are actually the same. Check that we don't
# set them twice in the SOCKS5 init stanza
connect_and_announce_alice(q, bus, conn, stream)
send_file_to_alice(q, conn)
return_event, e1, e2 = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
proxy_query_events[0], proxy_query_events[1])
send_socks5_reply(stream, e1.stanza)
# send the same reply for the second stanza with with a different port
send_socks5_reply(stream, e2.stanza, 'fallback1-proxy.localhost', '127.0.0.1', '6789')
proxies = wait_si_and_return_proxies(q, stream)
# check that the proxy has been set only once
check_proxies([('fallback1-proxy.localhost', '127.0.0.1', '6789')], proxies)
def cache_full(q, bus, conn, stream):
# Test how Gabble manages the proxy cache once it's full
connect_and_announce_alice(q, bus, conn, stream)
send_file_to_alice(q, conn)
# 3 proxies are queried (NB_MIN_SOCKS5_PROXIES)
return_event, e1, e2, e3 = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS))
send_socks5_reply(stream, e1.stanza)
send_socks5_reply(stream, e2.stanza)
send_socks5_reply(stream, e3.stanza)
proxies = wait_si_and_return_proxies(q, stream)
assertLength(3, set(proxies))
oldest_proxy = proxies[2]
# send another file, one more proxy is queried
send_file_to_alice(q, conn)
return_event, e1, = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS))
send_socks5_reply(stream, e1.stanza)
proxies = wait_si_and_return_proxies(q, stream)
assertLength(4, set(proxies))
# the new proxy is the head of the list
assertEquals(e1.stanza['to'], proxies[0][0])
# send another file, one more proxy is queried
send_file_to_alice(q, conn)
return_event, e1, = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS))
send_socks5_reply(stream, e1.stanza)
proxies = wait_si_and_return_proxies(q, stream)
assertLength(5, set(proxies))
# the new proxy is the head of the list
assertEquals(e1.stanza['to'], proxies[0][0])
# send another file, one more proxy is queried
send_file_to_alice(q, conn)
return_event, e1, = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS))
send_socks5_reply(stream, e1.stanza)
proxies = wait_si_and_return_proxies(q, stream)
# we reached the max size of the cache (FALLBACK_PROXY_CACHE_SIZE) so the
# oldest proxy has been removed
assertLength(5, set(proxies))
# the new proxy is the head of the list
assertEquals(e1.stanza['to'], proxies[0][0])
# the oldest proxy has been removed
assertDoesNotContain(oldest_proxy, proxies)
#send another file. We already queried all the proxies so the list is recycled
send_file_to_alice(q, conn)
# the oldest proxy is re-requested first
return_event, e1, = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', to=oldest_proxy[0], iq_type='get', query_ns=ns.BYTESTREAMS))
def proxy_error(q, bus, conn, stream):
# Test if another proxy is queried if a query failed
connect_and_announce_alice(q, bus, conn, stream)
send_file_to_alice(q, conn)
return_event, e1, e2, e3 = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS))
# Return errors for all the requests; the bugged proxies shouldn't be queried again
q.forbid_events([EventPattern('stream-iq', to=e1.stanza['to'],
iq_type='get', query_ns=ns.BYTESTREAMS)])
send_error_reply(stream, e1.stanza)
# the fourth proxy is queried
q.expect('stream-iq', iq_type='get', query_ns=ns.BYTESTREAMS)
q.forbid_events([EventPattern('stream-iq', to=e2.stanza['to'],
iq_type='get', query_ns=ns.BYTESTREAMS)])
send_error_reply(stream, e2.stanza)
sync_stream(q, stream)
q.forbid_events([EventPattern('stream-iq', to=e3.stanza['to'],
iq_type='get', query_ns=ns.BYTESTREAMS)])
send_error_reply(stream, e3.stanza)
sync_stream(q, stream)
def proxies_telepathy_im(q, bus, conn, stream):
# Test if proxies.telepathy.im is properly used when no fallback proxies
# are passed to Gabble
connect_and_announce_alice(q, bus, conn, stream)
send_file_to_alice(q, conn)
# Gabble asks for a proxy list to our server
return_event, e, = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('stream-iq', to='proxies.telepathy.im', iq_type='get', query_ns=ns.DISCO_ITEMS))
# reply with 2 servers
reply = make_result_iq(stream, e.stanza)
query = xpath.queryForNodes('/iq/query', reply)[0]
item = query.addElement((None, 'item'))
item['jid'] = 'proxy1.localhost'
item = query.addElement((None, 'item'))
item['jid'] = 'proxy2.localhost'
stream.send(reply)
# These servers are queried
e1, e2 = q.expect_many(
EventPattern('stream-iq', to='proxy1.localhost', iq_type='get', query_ns=ns.BYTESTREAMS),
EventPattern('stream-iq', to='proxy2.localhost', iq_type='get', query_ns=ns.BYTESTREAMS))
if __name__ == '__main__':
params = {'fallback-socks5-proxies': ['fallback1-proxy.localhost', 'fallback2-proxy.localhost']}
exec_test(offer_dbus_tube, params=params)
exec_test(accept_stream_tube, params=params)
exec_test(send_file, params=params)
exec_test(double_server, params=params)
params6 = {'fallback-socks5-proxies': ['fallback1-proxy.localhost', 'fallback2-proxy.localhost',
'fallback3-proxy.localhost', 'fallback4-proxy.localhost', 'fallback5-proxy.localhost',
'fallback6-proxy.localhost']}
exec_test(cache_full, params=params6)
params4 = {'fallback-socks5-proxies': ['fallback1-proxy.localhost', 'fallback2-proxy.localhost',
'fallback3-proxy.localhost', 'fallback4-proxy.localhost']}
exec_test(proxy_error, params=params4)
exec_test(proxies_telepathy_im, params={})
|
lgpl-2.1
| 4,001,548,555,548,118,000 | 35.814208 | 102 | 0.66684 | false |
madprime/django_celery_fileprocess_example
|
django_celery_fileprocess_example/settings.py
|
1
|
2311
|
"""
Django settings for django_celery_fileprocess_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f=g23hlj8z2(-p%6=p)&bv1(ca9nib-(s4=*mh6k5-=j@28ay+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'file_process',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_celery_fileprocess_example.urls'
WSGI_APPLICATION = 'django_celery_fileprocess_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
MEDIA_URL = '/media/'
|
apache-2.0
| -7,383,880,656,827,946,000 | 24.966292 | 79 | 0.727391 | false |
bdorney/CMS_GEM_Analysis_Framework
|
python/reparam1DObs2Gain.py
|
1
|
3162
|
import sys, os
from ROOT import gROOT, TClass, TFile, TH1F, TIter, TKey
if __name__ == "__main__":
#Import Options
from AnalysisOptions import *
from Utilities import getall, PARAMS_GAIN
#Specific Options
parser.add_option("--ident", type="string", dest="strIdent",
help="Substring within a TObject's TName that corresponds to the divider current value",
metavar="strIdent")
#Get input options
(options, args) = parser.parse_args()
#Check to make sure user specified an input file name
if options.filename is None:
#print "options.filename = " + str(options.filename)
print "You must specify the physical filepath of a Framework output file"
print "Use option -f or --file"
exit(1)
pass
#Check to make sure the user specified a gain curve
if options.gain_P0 is None or options.gain_P1 is None:
print "For G(x) = exp(P0 * x + P1)"
print "P0 = " + str(options.gain_P0)
print "P0_Err = " + str(options.gain_P0_Err)
print "P1 = " + str(options.gain_P1)
print "P1_Err = " + str(options.gain_P1_Err)
print "You must specify values for P0 & P1"
exit(1)
pass
#Check to make sure the user specified detector parameters
#if options.det_name is None:
# print "You must specify the detector name"
# print "Use option -n or --name"
# exit(1)
# pass
#Define the gain parameters
params_gain = PARAMS_GAIN(gain_p0=options.gain_P0,
gain_p0_err=options.gain_P0_Err,
gain_p1=options.gain_P1,
gain_p1_err=options.gain_P1_Err)
#Load the input file
file_Input = TFile(str(options.filename),"READ","",1)
#Loop over all TObjects in this File
list_IndepVar = []
list_DepVar = []
list_DepVar_Err = []
for strPath, obj in getall(file_Input, ""):
#Print the class name and path pair to user
if options.debug:
print obj.ClassName(), strPath
#Skip all objects that are not TH1's or their daughters
if obj.InheritsFrom("TH1") == False:
print "Skipping Current Object"
continue
#Get this histogram & store the values of interest
hInput = file_Input.Get(strPath)
list_DepVar.append(hInput.GetMean())
list_DepVar_Err.append(hInput.GetRMS())
#Get the current values
strName = hInput.GetName()
list_NameFields = strName.split('_')
list_NameFields = [x for x in list_NameFields if options.strIdent in x ]
if options.debug:
print list_CurrentVals
#Store the current
list_IndepVar.append(float(list_NameFields[0].replace(options.strIdent, ".")))
#Reparameterize independent variable into gain & print data to user
print "VAR_INDEP,VAR_DEP,VAR_DEP_ERR"
for i in range(0,len(list_IndepVar)):
list_IndepVar[i]=params_gain.calcGain(list_IndepVar[i])
print str(list_IndepVar[i]) + "," + str(list_DepVar[i]) + "," + str(list_DepVar_Err[i])
print "Finished"
|
gpl-3.0
| 4,429,647,154,765,510,700 | 34.133333 | 96 | 0.608476 | false |
txomon/SpockBot
|
spock/plugins/helpers/entities.py
|
1
|
6342
|
"""
An entity tracker
"""
import logging
from spock.plugins.base import PluginBase
from spock.utils import Info, pl_announce
logger = logging.getLogger('spock')
class MCEntity(Info):
eid = 0
status = 0
nbt = None
class ClientPlayerEntity(MCEntity):
metadata = None
class MovementEntity(MCEntity):
x = 0
y = 0
z = 0
yaw = 0
pitch = 0
on_ground = True
class PlayerEntity(MovementEntity):
uuid = 0
current_item = 0
metadata = None
class ObjectEntity(MovementEntity):
obj_type = 0
obj_data = 0
speed_x = 0
speed_y = 0
speed_z = 0
class MobEntity(MovementEntity):
mob_type = 0
head_pitch = 0
head_yaw = 0
velocity_x = 0
velocity_y = 0
velocity_z = 0
metadata = None
class PaintingEntity(MCEntity):
title = ""
location = {
'x': 0,
'y': 0,
'z': 0,
}
direction = 0
class ExpEntity(MCEntity):
x = 0
y = 0
z = 0
count = 0
class GlobalEntity(MCEntity):
global_type = 0
x = 0
y = 0
z = 0
class EntityCore(object):
def __init__(self):
self.client_player = ClientPlayerEntity()
self.entities = {}
self.players = {}
self.mobs = {}
self.objects = {}
self.paintings = {}
self.exp_orbs = {}
self.global_entities = {}
@pl_announce('Entities')
class EntityPlugin(PluginBase):
requires = ('Event')
events = {
'PLAY<Join Game': 'handle_join_game',
'PLAY<Spawn Player': 'handle_spawn_player',
'PLAY<Spawn Object': 'handle_spawn_object',
'PLAY<Spawn Mob': 'handle_spawn_mob',
'PLAY<Spawn Painting': 'handle_spawn_painting',
'PLAY<Spawn Experience Orb': 'handle_spawn_experience_orb',
'PLAY<Destroy Entities': 'handle_destroy_entities',
'PLAY<Entity Equipment': 'handle_unhandled',
'PLAY<Entity Velocity': 'handle_set_dict',
'PLAY<Entity Relative Move': 'handle_relative_move',
'PLAY<Entity Look': 'handle_set_dict',
'PLAY<Entity Look and Relative Move': 'handle_relative_move',
'PLAY<Entity Teleport': 'handle_set_dict',
'PLAY<Entity Head Look': 'handle_set_dict',
'PLAY<Entity Status': 'handle_set_dict',
'PLAY<Entity Metadata': 'handle_set_dict',
'PLAY<Entity Effect': 'handle_unhandled',
'PLAY<Remove Entity Effect': 'handle_unhandled',
'PLAY<Entity Properties': 'handle_unhandled',
'PLAY<Spawn Global Entity': 'handle_spawn_global_entity',
'PLAY<Update Entity NBT': 'handle_set_dict',
}
def __init__(self, ploader, settings):
super(EntityPlugin, self).__init__(ploader, settings)
self.ec = EntityCore()
ploader.provides('Entities', self.ec)
# TODO: Implement all these things
def handle_unhandled(self, event, packet):
pass
def handle_join_game(self, event, packet):
self.ec.client_player.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = self.ec.client_player
def handle_spawn_player(self, event, packet):
entity = PlayerEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.players[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_object(self, event, packet):
entity = ObjectEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.objects[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_mob(self, event, packet):
entity = MobEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.mobs[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_painting(self, event, packet):
entity = PaintingEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.paintings[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_experience_orb(self, event, packet):
entity = ExpEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.exp_orbs[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_global_entity(self, event, packet):
entity = GlobalEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.global_entities[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_destroy_entities(self, event, packet):
for eid in packet.data['eids']:
if eid in self.ec.entities:
entity = self.ec.entities[eid]
del self.ec.entities[eid]
if eid in self.ec.players:
del self.ec.players[eid]
elif eid in self.ec.objects:
del self.ec.objects[eid]
elif eid in self.ec.mobs:
del self.ec.mobs[eid]
elif eid in self.ec.paintings:
del self.ec.paintings[eid]
elif eid in self.ec.exp_orbs:
del self.ec.exp_orbs[eid]
elif eid in self.ec.global_entities:
del self.ec.global_entities[eid]
self.event.emit('entity_destroy', {'entity': entity})
def handle_relative_move(self, event, packet):
if packet.data['eid'] in self.ec.entities:
entity = self.ec.entities[packet.data['eid']]
old_pos = [entity.x, entity.y, entity.z]
entity.set_dict(packet.data)
entity.x = entity.x + packet.data['dx']
entity.y = entity.y + packet.data['dy']
entity.z = entity.z + packet.data['dz']
self.event.emit('entity_move',
{'entity': entity, 'old_pos': old_pos})
def handle_set_dict(self, event, packet):
if packet.data['eid'] in self.ec.entities:
self.ec.entities[packet.data['eid']].set_dict(packet.data)
|
mit
| -2,425,411,795,611,821,600 | 29.936585 | 70 | 0.584831 | false |
ohduran/CrowdFinanceInfographic
|
tests/test_basic.py
|
1
|
2207
|
"""Test cases for the project."""
import sys
import unittest
# Import modules from sample
sys.path.insert(0, 'sample')
import core
import helpers
# Set up the data that we are using in tests.
data = helpers.open_json_as_dict('jsons/bigsample.json')
class BasicTest(unittest.TestCase):
"""Basic Test functionality."""
def test_data_is_not_None(self):
"""Data dictionary brought by the set up is valid."""
# Test data is displayed as dictionary
self.assertIsNotNone(data) # is not None
self.assertIsInstance(data, list) # is a list of dictionaries
for i in range(len(data)):
self.assertIsInstance(data[i], object) # entries are dictionaries
# Test structure of the data
self.assertIsInstance(data[i]['url'], unicode)
self.assertIsInstance(data[i]['status'], unicode)
self.assertIsInstance(data[i]['goal_fx']['GBP'], float)
self.assertIsInstance(data[i]['start_time'], unicode)
self.assertIsInstance(data[i]['end_time'], unicode)
self.assertIsInstance(data[i]['goal_fx'], object)
# It is either an integer or a float
self.assertTrue(
isinstance(data[i]['goal_fx']['GBP'], int) or
isinstance(data[i]['goal_fx']['GBP'], float))
self.assertIsInstance(data[i]['raised_fx'], object)
# It is either an integer or a float
self.assertTrue(
isinstance(data[i]['raised_fx']['GBP'], int) or
isinstance(data[i]['raised_fx']['GBP'], float))
# Some doesn't include concepts.
try:
self.assertIsInstance(data[i]['concepts'], object)
for j in range(len(data[i]['concepts'])):
self.assertIsInstance(
data[i]['concepts'][j]['start'], int)
self.assertIsInstance(
data[i]['concepts'][j]['end'], int)
self.assertIsInstance(
data[i]['concepts'][j]['concept'], unicode)
except KeyError:
pass
if __name__ == '__main__':
unittest.main()
|
mit
| -7,129,107,591,919,053,000 | 38.410714 | 78 | 0.561849 | false |
chrisspen/django-feeds
|
djangofeeds/conf.py
|
1
|
5159
|
from datetime import timedelta
from django.conf import settings
try:
from celery import conf as celeryconf
DEFAULT_ROUTING_KEY = celeryconf.DEFAULT_ROUTING_KEY
except ImportError:
DEFAULT_ROUTING_KEY = "celery"
DEFAULT_DEFAULT_POST_LIMIT = 20
DEFAULT_NUM_POSTS = -1
DEFAULT_CACHE_MIN = 30
DEFAULT_ENTRY_WORD_LIMIT = 100
DEFAULT_FEED_TIMEOUT = 10
DEFAULT_REFRESH_EVERY = 3 * 60 * 60 # 3 hours
DEFAULT_FEED_LOCK_EXPIRE = 60 * 3 # lock expires in 3 minutes.
DEFAULT_MIN_REFRESH_INTERVAL = timedelta(seconds=60 * 20)
DEFAULT_FEED_LOCK_CACHE_KEY_FMT = "djangofeeds.import_lock.%s"
""" .. data:: STORE_ENCLOSURES
Keep post enclosures.
Default: False
Taken from: ``settings.DJANGOFEEDS_STORE_ENCLOSURES``.
"""
STORE_ENCLOSURES = getattr(settings, "DJANGOFEEDS_STORE_ENCLOSURES", False)
""" .. data:: STORE_CATEGORIES
Keep feed/post categories
Default: False
Taken from: ``settings.DJANGOFEEDS_STORE_CATEGORIES``.
"""
STORE_CATEGORIES = getattr(settings, "DJANGOFEEDS_STORE_CATEGORIES", False)
"""
.. data:: MIN_REFRESH_INTERVAL
Feed should not be refreshed if it was last refreshed within this time.
(in seconds)
Default: 20 minutes
Taken from: ``settings.DJANGOFEEDS_MIN_REFRESH_INTERVAL``.
"""
MIN_REFRESH_INTERVAL = getattr(settings, "DJANGOFEEDS_MIN_REFRESH_INTERVAL",
DEFAULT_MIN_REFRESH_INTERVAL)
"""
.. data:: FEED_TIMEOUT
Timeout in seconds for the feed to refresh.
Default: 10 seconds
Taken from: ``settings.DJANGOFEEDS_FEED_TIMEOUT``.
"""
FEED_TIMEOUT = getattr(settings, "DJANGOFEEDS_FEED_TIMEOUT",
DEFAULT_FEED_TIMEOUT)
def _interval(interval):
if isinstance(interval, int):
return timedelta(seconds=interval)
return interval
# Make sure MIN_REFRESH_INTERVAL is a timedelta object.
MIN_REFRESH_INTERVAL = _interval(MIN_REFRESH_INTERVAL)
""" .. data:: DEFAULT_POST_LIMIT
The default number of posts to import.
Taken from: ``settings.DJANGOFEEDS_DEFAULT_POST_LIMIT``.
"""
DEFAULT_POST_LIMIT = getattr(settings, "DJANGOFEEDS_DEFAULT_POST_LIMIT",
DEFAULT_DEFAULT_POST_LIMIT)
""" .. data:: REFRESH_EVERY
Interval in seconds between feed refreshes.
Default: 3 hours
Taken from: ``settings.DJANGOFEEDS_REFRESH_EVERY``.
"""
REFRESH_EVERY = getattr(settings, "DJANGOFEEDS_REFRESH_EVERY",
DEFAULT_REFRESH_EVERY)
"""".. data:: FEED_LAST_REQUESTED_REFRESH_LIMIT
the maximum amount of time a feed can be unused
before stopping refreshing it. Used by opal-feed.
"""
FEED_LAST_REQUESTED_REFRESH_LIMIT = getattr(settings,
"FEED_LAST_REQUESTED_REFRESH_LIMIT", None)
""" .. data:: ROUTING_KEY_PREFIX
Prefix for AMQP routing key.
Default: ``celery.conf.AMQP_PUBLISHER_ROUTING_KEY``.
Taken from: ``settings.DJANGOFEEDS_ROUTING_KEY_PREFIX``.
"""
ROUTING_KEY_PREFIX = getattr(settings, "DJANGOFEEDS_ROUTING_KEY_PREFIX",
DEFAULT_ROUTING_KEY)
""" .. data:: FEED_LOCK_CACHE_KEY_FMT
Format used for feed cache lock. Takes one argument: the feeds URL.
Default: "djangofeeds.import_lock.%s"
Taken from: ``settings.DJANGOFEEDS_FEED_LOCK_CACHE_KEY_FMT``.
"""
FEED_LOCK_CACHE_KEY_FMT = getattr(settings,
"DJANGOFEEDS_FEED_LOCK_CACHE_KEY_FMT",
DEFAULT_FEED_LOCK_CACHE_KEY_FMT)
""" .. data:: FEED_LOCK_EXPIRE
Time in seconds which after the feed lock expires.
Default: 3 minutes
Taken from: ``settings.DJANGOFEEDS_FEED_LOCK_EXPIRE``.
"""
FEED_LOCK_EXPIRE = getattr(settings,
"DJANGOFEEDS_FEED_LOCK_EXPIRE",
DEFAULT_FEED_LOCK_EXPIRE)
POST_STORAGE_BACKEND = getattr(settings,
"DJANGOFEEDS_POST_STORAGE_BACKEND",
"djangofeeds.backends.database.DatabaseBackend")
REDIS_POST_HOST = getattr(settings,
"DJANGOFEEDS_REDIS_POST_HOST",
"localhost")
REDIS_POST_PORT = getattr(settings,
"DJANGOFEEDS_REDIS_POST_PORT",
None)
REDIS_POST_DB = getattr(settings,
"DJANGOFEEDS_REDIS_POST_DB",
"djangofeeds:post")
FSCK_ON_UPDATE = getattr(settings,
"DJANGOFEEDS_FSCK_ON_UPDATE",
False)
GET_ARTICLE_CONTENT = getattr(settings,
"DJANGOFEEDS_GET_ARTICLE_CONTENT",
True)
GET_ARTICLE_CONTENT_ONLY_MIME_TYPES = getattr(settings,
"DJANGOFEEDS_GET_ARTICLE_CONTENT_ONLY_MIME_TYPES",
# There's no point in attempting to extract text from a gif/jpeg/etc.
['text/html', 'text/plain', 'application/xhtml+xml', 'text/xml', 'applicate/xml'])
ALLOW_ADMIN_FEED_LOOKUPS = getattr(settings,
"DJANGOFEEDS_ALLOW_ADMIN_FEED_LOOKUPS",
False)
# Some feeds wrap all URLs in a tracking link.
# These regexes explain how to find the true URL in the post's given URL.
# e.g. [re.compile(r'&url=([^&$]+)')]
LINK_URL_REGEXES = getattr(settings,
"DJANGOFEEDS_LINK_URL_REGEXES",
[])
|
bsd-2-clause
| -471,363,245,432,641,150 | 29.347059 | 86 | 0.647606 | false |
wangyum/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/bernoulli_test.py
|
1
|
10847
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Bernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.special
from tensorflow.contrib.distributions.python.ops import bernoulli
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_bernoulli(batch_shape, dtype=dtypes.int32):
p = np.random.uniform(size=list(batch_shape))
p = constant_op.constant(p, dtype=dtypes.float32)
return bernoulli.Bernoulli(probs=p, dtype=dtype)
def entropy(p):
q = 1. - p
return -q * np.log(q) - p * np.log(p)
class BernoulliTest(test.TestCase):
def testP(self):
p = [0.2, 0.4]
dist = bernoulli.Bernoulli(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
def testLogits(self):
logits = [-42., 42.]
dist = bernoulli.Bernoulli(logits=logits)
with self.test_session():
self.assertAllClose(logits, dist.logits.eval())
with self.test_session():
self.assertAllClose(scipy.special.expit(logits), dist.probs.eval())
p = [0.01, 0.99, 0.42]
dist = bernoulli.Bernoulli(probs=p)
with self.test_session():
self.assertAllClose(scipy.special.logit(p), dist.logits.eval())
def testInvalidP(self):
invalid_ps = [1.01, 2.]
for p in invalid_ps:
with self.test_session():
with self.assertRaisesOpError("probs has components greater than 1"):
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
dist.probs.eval()
invalid_ps = [-0.01, -3.]
for p in invalid_ps:
with self.test_session():
with self.assertRaisesOpError("Condition x >= 0"):
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
dist.probs.eval()
valid_ps = [0.0, 0.5, 1.0]
for p in valid_ps:
with self.test_session():
dist = bernoulli.Bernoulli(probs=p)
self.assertEqual(p, dist.probs.eval()) # Should not fail
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_bernoulli(batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape.as_list())
self.assertAllEqual([], dist.event_shape_tensor().eval())
def testDtype(self):
dist = make_bernoulli([])
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dist.mean().dtype)
self.assertEqual(dist.probs.dtype, dist.variance().dtype)
self.assertEqual(dist.probs.dtype, dist.stddev().dtype)
self.assertEqual(dist.probs.dtype, dist.entropy().dtype)
self.assertEqual(dist.probs.dtype, dist.prob(0).dtype)
self.assertEqual(dist.probs.dtype, dist.log_prob(0).dtype)
dist64 = make_bernoulli([], dtypes.int64)
self.assertEqual(dist64.dtype, dtypes.int64)
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
self.assertEqual(dist64.dtype, dist64.mode().dtype)
def _testPmf(self, **kwargs):
dist = bernoulli.Bernoulli(**kwargs)
with self.test_session():
# pylint: disable=bad-continuation
xs = [
0,
[1],
[1, 0],
[[1, 0]],
[[1, 0], [1, 1]],
]
expected_pmfs = [
[[0.8, 0.6], [0.7, 0.4]],
[[0.2, 0.4], [0.3, 0.6]],
[[0.2, 0.6], [0.3, 0.4]],
[[0.2, 0.6], [0.3, 0.4]],
[[0.2, 0.6], [0.3, 0.6]],
]
# pylint: enable=bad-continuation
for x, expected_pmf in zip(xs, expected_pmfs):
self.assertAllClose(dist.prob(x).eval(), expected_pmf)
self.assertAllClose(dist.log_prob(x).eval(), np.log(expected_pmf))
def testPmfCorrectBroadcastDynamicShape(self):
with self.test_session():
p = array_ops.placeholder(dtype=dtypes.float32)
dist = bernoulli.Bernoulli(probs=p)
event1 = [1, 0, 1]
event2 = [[1, 0, 1]]
self.assertAllClose(
dist.prob(event1).eval({
p: [0.2, 0.3, 0.4]
}), [0.2, 0.7, 0.4])
self.assertAllClose(
dist.prob(event2).eval({
p: [0.2, 0.3, 0.4]
}), [[0.2, 0.7, 0.4]])
def testPmfInvalid(self):
p = [0.1, 0.2, 0.7]
with self.test_session():
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
with self.assertRaisesOpError("must be non-negative."):
dist.prob([1, 1, -1]).eval()
with self.assertRaisesOpError("is not less than or equal to 1."):
dist.prob([2, 0, 1]).eval()
def testPmfWithP(self):
p = [[0.2, 0.4], [0.3, 0.6]]
self._testPmf(probs=p)
self._testPmf(logits=scipy.special.logit(p))
def testBroadcasting(self):
with self.test_session():
p = array_ops.placeholder(dtypes.float32)
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(np.log(0.5), dist.log_prob(1).eval({p: 0.5}))
self.assertAllClose(
np.log([0.5, 0.5, 0.5]), dist.log_prob([1, 1, 1]).eval({
p: 0.5
}))
self.assertAllClose(
np.log([0.5, 0.5, 0.5]), dist.log_prob(1).eval({
p: [0.5, 0.5, 0.5]
}))
def testPmfShapes(self):
with self.test_session():
p = array_ops.placeholder(dtypes.float32, shape=[None, 1])
dist = bernoulli.Bernoulli(probs=p)
self.assertEqual(2, len(dist.log_prob(1).eval({p: [[0.5], [0.5]]}).shape))
with self.test_session():
dist = bernoulli.Bernoulli(probs=0.5)
self.assertEqual(2, len(dist.log_prob([[1], [1]]).eval().shape))
with self.test_session():
dist = bernoulli.Bernoulli(probs=0.5)
self.assertEqual((), dist.log_prob(1).get_shape())
self.assertEqual((1), dist.log_prob([1]).get_shape())
self.assertEqual((2, 1), dist.log_prob([[1], [1]]).get_shape())
with self.test_session():
dist = bernoulli.Bernoulli(probs=[[0.5], [0.5]])
self.assertEqual((2, 1), dist.log_prob(1).get_shape())
def testBoundaryConditions(self):
with self.test_session():
dist = bernoulli.Bernoulli(probs=1.0)
self.assertAllClose(np.nan, dist.log_prob(0).eval())
self.assertAllClose([np.nan], [dist.log_prob(1).eval()])
def testEntropyNoBatch(self):
p = 0.2
dist = bernoulli.Bernoulli(probs=p)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), entropy(p))
def testEntropyWithBatch(self):
p = [[0.1, 0.7], [0.2, 0.6]]
dist = bernoulli.Bernoulli(probs=p, validate_args=False)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [[entropy(0.1), entropy(0.7)],
[entropy(0.2), entropy(0.6)]])
def testSampleN(self):
with self.test_session():
p = [0.2, 0.6]
dist = bernoulli.Bernoulli(probs=p)
n = 100000
samples = dist.sample(n)
samples.set_shape([n, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertTrue(np.all(sample_values >= 0))
self.assertTrue(np.all(sample_values <= 1))
# Note that the standard error for the sample mean is ~ sqrt(p * (1 - p) /
# n). This means that the tolerance is very sensitive to the value of p
# as well as n.
self.assertAllClose(p, np.mean(sample_values, axis=0), atol=1e-2)
self.assertEqual(set([0, 1]), set(sample_values.flatten()))
# In this test we're just interested in verifying there isn't a crash
# owing to mismatched types. b/30940152
dist = bernoulli.Bernoulli(np.log([.2, .4]))
self.assertAllEqual((1, 2), dist.sample(1, seed=42).get_shape().as_list())
def testSampleActsLikeSampleN(self):
with self.test_session() as sess:
p = [0.2, 0.6]
dist = bernoulli.Bernoulli(probs=p)
n = 1000
seed = 42
self.assertAllEqual(
dist.sample(n, seed).eval(), dist.sample(n, seed).eval())
n = array_ops.placeholder(dtypes.int32)
sample, sample = sess.run([dist.sample(n, seed), dist.sample(n, seed)],
feed_dict={n: 1000})
self.assertAllEqual(sample, sample)
def testMean(self):
with self.test_session():
p = np.array([[0.2, 0.7], [0.5, 0.4]], dtype=np.float32)
dist = bernoulli.Bernoulli(probs=p)
self.assertAllEqual(dist.mean().eval(), p)
def testVarianceAndStd(self):
var = lambda p: p * (1. - p)
with self.test_session():
p = [[0.2, 0.7], [0.5, 0.4]]
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(
dist.variance().eval(),
np.array(
[[var(0.2), var(0.7)], [var(0.5), var(0.4)]], dtype=np.float32))
self.assertAllClose(
dist.stddev().eval(),
np.array(
[[np.sqrt(var(0.2)), np.sqrt(var(0.7))],
[np.sqrt(var(0.5)), np.sqrt(var(0.4))]],
dtype=np.float32))
def testBernoulliWithSigmoidProbs(self):
p = np.array([8.3, 4.2])
dist = bernoulli.BernoulliWithSigmoidProbs(logits=p)
with self.test_session():
self.assertAllClose(math_ops.sigmoid(p).eval(), dist.probs.eval())
def testBernoulliBernoulliKL(self):
with self.test_session() as sess:
batch_size = 6
a_p = np.array([0.5] * batch_size, dtype=np.float32)
b_p = np.array([0.4] * batch_size, dtype=np.float32)
a = bernoulli.Bernoulli(probs=a_p)
b = bernoulli.Bernoulli(probs=b_p)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
kl_expected = (a_p * np.log(a_p / b_p) + (1. - a_p) * np.log(
(1. - a_p) / (1. - b_p)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
apache-2.0
| 3,666,041,075,142,069,000 | 35.156667 | 80 | 0.609754 | false |
quartzmo/gcloud-ruby
|
google-cloud-dialogflow/synth.py
|
1
|
2614
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v2_library = gapic.ruby_library(
'dialogflow', 'v2',
config_path='/google/cloud/dialogflow/artman_dialogflow_v2.yaml',
artman_output_name='google-cloud-ruby/google-cloud-dialogflow'
)
s.copy(v2_library / 'lib')
s.copy(v2_library / 'test')
s.copy(v2_library / 'README.md')
s.copy(v2_library / 'LICENSE')
s.copy(v2_library / '.gitignore')
s.copy(v2_library / '.yardopts')
s.copy(v2_library / 'google-cloud-dialogflow.gemspec', merge=ruby.merge_gemspec)
# https://github.com/googleapis/gapic-generator/issues/2232
s.replace(
[
'lib/google/cloud/dialogflow/v2/agents_client.rb',
'lib/google/cloud/dialogflow/v2/entity_types_client.rb',
'lib/google/cloud/dialogflow/v2/intents_client.rb'
],
'\n\n(\\s+)class OperationsClient < Google::Longrunning::OperationsClient',
'\n\n\\1# @private\n\\1class OperationsClient < Google::Longrunning::OperationsClient')
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/dialogflow/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
|
apache-2.0
| 2,215,669,220,090,041,600 | 32.512821 | 116 | 0.685157 | false |
pombredanne/PyGithub
|
github/RepositoryKey.py
|
1
|
5564
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Srijan Choudhary <srijan4@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class RepositoryKey(github.GithubObject.CompletableGithubObject):
"""
This class represents RepositoryKeys. The reference can be found here http://developer.github.com/v3/repos/keys/
"""
def __init__(self, requester, headers, attributes, completed, repoUrl):
github.GithubObject.CompletableGithubObject.__init__(self, requester, headers, attributes, completed)
self.__repoUrl = repoUrl
@property
def __customUrl(self):
return self.__repoUrl + "/keys/" + str(self.id)
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def key(self):
"""
:type: string
"""
self._completeIfNotSet(self._key)
return self._key.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def verified(self):
"""
:type: bool
"""
self._completeIfNotSet(self._verified)
return self._verified.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.__customUrl
)
def edit(self, title=github.GithubObject.NotSet, key=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:param title: string
:param key: string
:rtype: None
"""
assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
assert key is github.GithubObject.NotSet or isinstance(key, (str, unicode)), key
post_parameters = dict()
if title is not github.GithubObject.NotSet:
post_parameters["title"] = title
if key is not github.GithubObject.NotSet:
post_parameters["key"] = key
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.__customUrl,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._key = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._verified = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "key" in attributes: # pragma no branch
self._key = self._makeStringAttribute(attributes["key"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "verified" in attributes: # pragma no branch
self._verified = self._makeBoolAttribute(attributes["verified"])
|
gpl-3.0
| -5,146,577,369,882,301,000 | 40.522388 | 116 | 0.516894 | false |
leiferikb/bitpop
|
build/third_party/buildbot_8_4p1/buildbot/config.py
|
2
|
4429
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.util import safeTranslate
class MasterConfig(object):
"""
Namespace for master configuration values. An instance of this class is
available at C{master.config}.
@ivar changeHorizon: the current change horizon
"""
changeHorizon = None
class BuilderConfig:
"""
Used in config files to specify a builder - this can be subclassed by users
to add extra config args, set defaults, or whatever. It is converted to a
dictionary for consumption by the buildmaster at config time.
"""
def __init__(self,
name=None,
slavename=None,
slavenames=None,
builddir=None,
slavebuilddir=None,
factory=None,
category=None,
nextSlave=None,
nextBuild=None,
nextSlaveAndBuild=None,
locks=None,
env=None,
properties=None,
mergeRequests=None):
# name is required, and can't start with '_'
if not name or type(name) not in (str, unicode):
raise ValueError("builder's name is required")
if name[0] == '_':
raise ValueError("builder names must not start with an "
"underscore: " + name)
self.name = name
# factory is required
if factory is None:
raise ValueError("builder's factory is required")
self.factory = factory
# slavenames can be a single slave name or a list, and should also
# include slavename, if given
if type(slavenames) is str:
slavenames = [ slavenames ]
if slavenames:
if type(slavenames) is not list:
raise TypeError("slavenames must be a list or a string")
else:
slavenames = []
if slavename:
if type(slavename) != str:
raise TypeError("slavename must be a string")
slavenames = slavenames + [ slavename ]
if not slavenames:
raise ValueError("at least one slavename is required")
self.slavenames = slavenames
# builddir defaults to name
if builddir is None:
builddir = safeTranslate(name)
self.builddir = builddir
# slavebuilddir defaults to builddir
if slavebuilddir is None:
slavebuilddir = builddir
self.slavebuilddir = slavebuilddir
# remainder are optional
assert category is None or isinstance(category, str)
self.category = category
self.nextSlave = nextSlave
self.nextBuild = nextBuild
self.nextSlaveAndBuild = nextSlaveAndBuild
self.locks = locks
self.env = env
self.properties = properties
self.mergeRequests = mergeRequests
def getConfigDict(self):
rv = {
'name': self.name,
'slavenames': self.slavenames,
'factory': self.factory,
'builddir': self.builddir,
'slavebuilddir': self.slavebuilddir,
}
if self.category:
rv['category'] = self.category
if self.nextSlave:
rv['nextSlave'] = self.nextSlave
if self.nextBuild:
rv['nextBuild'] = self.nextBuild
if self.nextSlaveAndBuild:
rv['nextSlaveAndBuild'] = self.nextSlaveAndBuild
if self.locks:
rv['locks'] = self.locks
if self.env:
rv['env'] = self.env
if self.properties:
rv['properties'] = self.properties
if self.mergeRequests:
rv['mergeRequests'] = self.mergeRequests
return rv
|
gpl-3.0
| -5,187,915,017,246,403,000 | 33.333333 | 79 | 0.600587 | false |
updownlife/multipleK
|
src/select_sub.py
|
1
|
3215
|
import random
def sequentialy(qset):
rset = []
for i in range(0, len(qset)):
rset.append(qset[i])
return rset
def randomly(_qset):
qset = list(_qset)
rset = []
while qset:
sel = random.choice(qset)
rset.append(sel)
qset.remove(sel)
return rset
def fullBFS(qset):
if len(qset) <= 2:
return qset
rset = []
rset.append(qset[0])
rset.append(qset[-1])
end = len(qset) - 2
queue = []
cur = Node((1 + end + 1) / 2, 1 ,end)
queue.append(cur)
while len(queue) != 0 :
cur = queue.pop(0)
rset.append(qset[cur.val])
if cur.left < cur.val:
val = (cur.left + cur.val) / 2
leftSon = Node(val, cur.left, cur.val - 1)
queue.append(leftSon)
if cur.val < cur.right:
val = (cur.right + cur.val +1) / 2
rightSon = Node(val, cur.val+1, cur.right)
queue.append(rightSon)
return rset
##################### below is abandoned #######################
def binary(qset, num):
if num > len(qset) or num <= 0:
print "subquery amount overflow! num = " + str(num)
return qset
rset = []
rset.append(qset[0])
if num == 1:
return rset
end = len(qset) - 1
rset.append(qset[end])
count = []
count.append(num - 2) # minus 2: head and tail element
bs_helper(qset, rset, 1, end-1, count)
return rset
def bs_helper(qset, rset, left, right, count):
if left > right or count[0] <= 0:
return
mid = (left + right)/2
# print qset[mid]
rset.append(qset[mid])
count[0] -= 1
bs_helper(qset, rset, left, mid - 1, count)
bs_helper(qset, rset, mid + 1, right, count)
def skip(qset, num):
if num > len(qset) or num <= 0:
print "subquery amount overflow! num = " + str(num)
return qset
visited = [False] * len(qset)
rset = []
rset.append(qset[0])
visited[0] = True
if num == 1:
return rset
end = len(qset) - 1
rset.append(qset[end])
visited[end] = True
num -= 2
step = len(qset) / 2
while num > 0 and step > 0:
cur = 0
while cur < end:
if not visited[cur]:
rset.append(qset[cur])
visited[cur] = True
cur += step
step /= 2
return rset
class Node(object):
def __init__(self, val, left, right):
self.val = val
self.left = left
self.right = right
def BTLT(qset, num):
if num >= len(qset) or num <= 0:
# print "subquery amount overflow! num = " + str(num)
return qset
rset = []
rset.append(qset[0])
if num == 1:
return rset
end = len(qset) - 1
rset.append(qset[end])
if num == 2:
return rset
visited = [False] * len(qset)
visited[0] = True
visited[1] = True
num -= 2
queue = []
cur = Node(len(qset) / 2, 0 ,end)
queue.append(cur)
while len(queue) != 0 and num > 0:
cur = queue.pop(0)
if not visited[cur.val]:
rset.append(qset[cur.val])
visited[cur.val] = True
num -= 1
leftVal = (cur.left + cur.val) / 2
leftSon = Node(leftVal, cur.left, cur.val)
rightVal = (cur.right + cur.val) / 2
rightSon = Node(rightVal, cur.val, cur.right)
queue.append(leftSon)
queue.append(rightSon)
return rset
|
gpl-2.0
| 5,703,938,021,751,284,000 | 20.013072 | 64 | 0.553033 | false |
BeagleInc/PyReadableDiff
|
setup.py
|
1
|
1652
|
import setuptools
import pydiff
NAME = 'PyReadableDiff'
DESCRIPTION = 'Intuitive human-readable diff for text'
# Use .rst markup for the long description in order to provide
# the link to the repository, since PyPI doesn't support .md markup,
# so we can't use the content of README.md for this purpose.
LONG_DESCRIPTION = 'For more detailed information about the library please ' \
'visit `the official repository ' \
'<https://github.com/iuliux/PyReadableDiff>`_.'
AUTHOR = 'Gevorg Davoian, Iulius Curt, Kevin Decker ' \
'(the author of the original jsdiff library) and others'
URL = 'https://github.com/iuliux/PyReadableDiff'
VERSION = pydiff.__version__
PACKAGES = setuptools.find_packages()
KEYWORDS = ['python', 'text', 'diff', 'pydiff']
CLASSIFIERS = [
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules"
]
LICENSE = 'Apache-2.0'
setuptools.setup(
name=NAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
url=URL,
version=VERSION,
packages=PACKAGES,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
license=LICENSE
)
|
apache-2.0
| 2,248,738,196,769,096,400 | 27.982456 | 78 | 0.671308 | false |
upcFrost/SDR2_translate
|
rpErrorHandler.py
|
1
|
26686
|
#------------------------------------------------------------------------------#
# #
# Run-time error handler for Rapyd projects #
# #
#------------------------------------------------------------------------------#
# This module handles run-time errors in order to report error locations in
# terms of project module, form and line number. When your project is being run
# from within Rapid this module facilitates the process whereby you can jump
# directly to any project line shown in the error traceback.
#
# The file <projectname>.dec, which was generated when your project was built, is
# required by this module in order to report error locations properly.
from Tkinter import *
import os
import os.path
import sys
import time
import traceback
def D(*Stuff):
"""=u
This simply prints the arguments.
The point of using this, rather than 'print', is that once were done debugging
we can easily search for and remove the 'D' statements.
"""
for T in Stuff:
print T,
print
def Grabber(Widget,Whine=0):
"""=u
A persistent grabber
For unknown reasons it sometimes takes awhile before the grab is successful;
perhaps you have to wait until the window has finished appearing. In
any case this persistent grabber loops for up to 0.3 seconds waiting for
the grab to take before giving up.
"""
for J in range(3):
time.sleep(0.1)
try:
Widget.grab_set()
return
except TclError:
pass
if Whine:
print 'Grab failed'
def PythonModuleFind(Name):
"""
Walk the Python search path looking for a module.
Given a simple file name, eg "Tkinter.py", we look for the first occurrance in the Python module
search path and return as the result the full path to the file including the file itself,
eg "/usr/lib/python2.3/lib-tk/Tkinter.py".
If we don't find a matching file we return None.
"""
for Path in sys.path:
if Path:
try:
for FileName in os.listdir(Path):
if FileName == Name:
Result = Path+os.path.sep+Name
if os.path.isfile(Result):
return Result
except OSError:
#Oh-on - no such directory
pass
return None
def ExtractName(Path):
"""
Extract just the bare filename from a path.
eg given "/spam/parrot/stringettes.py" we return "stringettes"
"""
return os.path.splitext(os.path.split(Path)[1])[0]
class CallWrapper:
"""
Call wrapper so we can display errors which occur in a callback in a
Rapyd friendly manner.
"""
def __init__(self, func, subst, widget):
self.func = func
self.subst = subst
self.widget = widget
# Calling back from Tk into python.
def __call__(self, *args):
try:
if self.subst:
args = apply(self.subst, args)
return apply(self.func, args)
except SystemExit, msg:
raise SystemExit, msg
except:
#_reporterror(self.func, args)
RunError(func=self.func,args=args)
EventNames = {
2 : 'KeyPress', 15 : 'VisibilityNotify', 28 : 'PropertyNotify',
3 : 'KeyRelease', 16 : 'CreateNotify', 29 : 'SelectionClear',
4 : 'ButtonPress', 17 : 'DestroyNotify', 30 : 'SelectionRequest',
5 : 'ButtonRelease', 18 : 'UnmapNotify', 31 : 'SelectionNotify',
6 : 'MotionNotify', 19 : 'MapNotify', 32 : 'ColormapNotify',
7 : 'EnterNotify', 20 : 'MapRequest', 33 : 'ClientMessage',
8 : 'LeaveNotify', 21 : 'ReparentNotify', 34 : 'MappingNotify',
9 : 'FocusIn', 22 : 'ConfigureNotify', 35 : 'VirtualEvents',
10 : 'FocusOut', 23 : 'ConfigureRequest', 36 : 'ActivateNotify',
11 : 'KeymapNotify', 24 : 'GravityNotify', 37 : 'DeactivateNotify',
12 : 'Expose', 25 : 'ResizeRequest', 38 : 'MouseWheelEvent',
13 : 'GraphicsExpose', 26 : 'CirculateNotify',
14 : 'NoExpose', 27 : 'CirculateRequest',
}
class RunError(Toplevel):
"""
Dialog to handle error when user runs their project
This dialog can be invoked in four different ways:
o By direct call from inside Rapyd if a syntax error was encountered when
attempting to compile a user project prior to running it.
This is mode: 0
How we know: Rapyd passes us a locator dict as keyword argument
"Info".
o From the users project which has been spawned as it's own process
from Rapyd.
This is mode: 1.
Now we know: Environment contains a variable named FROMRAPYD whose
value is '*'.
o From the users project which has been run standalone.
This is mode: 2.
How we know: No "Info" argument and no FROMRAPYD environment variable.
o From Rapyd, using execfile. Rapyd does this in order to check for
syntax errors which would have prevented the project from running.
If there are syntax-class errors then control passes back to rapyd
which then invokes as described above for mode 0. However if, while
being invoked via execfile, the main program itself is syntax error
free but a run-time error is encountered or a syntax error happens
in an imported file then we get control. As part of doing the execfile,
Rapyd puts LocatorDict in our namespace, so it's just *there* without
our having to do anything special
This is mode: 3.
How we know: Environment contains a variable named FROMRAPYD whose
value is the path to the decoder file.
At startup we dig around and set self.mode per the above values.
We need to get the locator dictionary so we can translate from python line numbers
to Rapyd project line numbers. In Mode-0 Rapyd passes us the argument "Info"
which contains the dictionary. In other modes we go looking for the file ----.dec
(where ---- is the name of the project) and we build the locator dictionary from
that file which was itself written when Rapyd built the project.
In locator dictionary the key is the name of each generated module of the
current project and the data is a locator list where each element is a 3-tuple:
o [0] The name of the form or '-Main-'
o [1] The offset in lines to where the first line from this form appears
in the generated file.
o [2] The number of lines from this form which appear in the generated form
at this spot.
If called directly from within rapyd (mode 0) then our result is returned in self.Result:
o If user clicks "Dismiss" we return None
o If user clicks "Pass error to Tkinter" we return 1
o If user clicks on a line to go to we return a 3-list:
[0] The name of the module of the errer
[1] The name of the form of the error or '-Main-'
[2] The line number of the error
When run from inside Rapyd we have access to the Rapyd help module and a user
request for help we handle locally.
If run as a distinct process by rapyd (mode 1) or if run while Rapyd was doing an
"execfile" on the project (mode 3) then we return a result by writing to a
file named "----.result" where ---- is the name of the project.
o If the user clicks on "Dismiss" we exit without creating any file.
o If the user clicks on a line to go to then ----.result consists of
three elements:
- The name of the module
- The name of the form, or -Main-
- The line number
if The user clicks on Help the ----.results consists of "!HELP!".
If run as a stand-alone process (mode 2) then there is nobody to return a result to,
there is no help and error lines are not displayed as links because the user
has no way to go directly to the code editor.
"""
def __init__(self,Master=None,**kw):
#
# Get and save error information before any other error can occur
#
self.ErrorDescription = str(sys.exc_info()[1])
#Each element of the traceback list is a tuple:
# o [0] Path and filename. Path may be an absolute path or may be relative
# to the current working directory.
# o [1] Line number
# o [2] Function
# o [3] Text
self.TracebackList = traceback.extract_tb(sys.exc_info()[2])
self.TypeOfError = str(sys.exc_info()[0])
#D('ErrorDescription=%s'%self.ErrorDescription)
#D('TracebackList=%s'%self.TracebackList)
#D('TypeOfError=%s'%self.TypeOfError)
if '.' in self.TypeOfError:
self.TypeOfError = self.TypeOfError.split('.')[1]
if self.TypeOfError[-2:] == "'>":
#Whereas previously Python would report the type of error as something like
# "NameError" as of around 2.5 it reports it as "<type 'exceptions.NameError'>"
# hence this code to shoot off the trailing crud.
self.TypeOfError = self.TypeOfError[:-2]
#D('TypeOfError=%s'%self.TypeOfError)
#
# Any strings placed in ErrorNotes are displayed to the user prior to the error
# traceback.
#
ErrorNotes = []
#
# Look for callback related information
#
if kw.has_key('func'):
#Looks like we were invoked from a callback
self.func = kw['func']
self.args = kw['args']
del kw['func']
del kw['args']
else:
self.func = None
self.args = None
#
# Get the locator dictionary and figure out what mode we are.
#
#D('Environ=%s'%os.environ)
if kw.has_key('Info'):
#We were called directly by Rapyd
self.LocatorDict = kw['Info']
del kw['Info']
self.ProjectDirectory = kw['ProjectDirectory']
del kw['ProjectDirectory']
self.ResultPath = None
self.Mode = 0
elif os.environ.has_key('FROMRAPYD') and os.environ['FROMRAPYD'] <> '*':
#We were invoked from within the project while Rapyd was doing an execfile
# on the project. In this case FROMRAPYD is the full path to the decoder
# file.
FileName = os.environ['FROMRAPYD']
self.ProjectDirectory = os.path.dirname(FileName) + os.path.sep
self.ResultPath = os.path.splitext(FileName)[0] + '.result'
self.Mode = 3
else:
#We are running as a program on our own.
self.ProjectDirectory = os.path.dirname(sys.argv[0])
if self.ProjectDirectory <> '':
self.ProjectDirectory += os.path.sep
FileName = os.path.splitext(sys.argv[0])[0]+'.dec'
if os.environ.has_key('FROMRAPYD'):
#But that program was invoked by Rapyd
self.ResultPath = os.path.splitext(FileName)[0] + '.result'
self.Mode = 1
else:
#We are totally stand-alone
self.ResultPath = None
self.Mode = 2
if self.Mode <> 0:
#In mode 0 we are handed LocatorDict on a silver platter. For all other modes we
# have to go fetch the decoder file and turn it into LocatorDict. How we get
# the path to the decoder file varies by mode, but at this point it should be
# in "FileName".
self.LocatorDict = {}
#D('About to look for decoder file at: %s'%FileName)
if os.path.isfile(FileName):
try:
F = open(FileName)
Temp = F.readlines()
F.close()
for Line in Temp:
Line = Line.rstrip()
if len(Line) == 0:
raise Exception, 'Empty line'
if Line[0] <> ' ':
ModuleName = Line
self.LocatorDict[ModuleName] = []
else:
Line = Line.strip().split()
self.LocatorDict[ModuleName].append((Line[0], int(Line[1]), int(Line[2])))
except:
self.LocatorDict = 'Error reading "%s"'%FileName
ErrorNotes.append('Unable to display module/form information due to error reading "%s"'%FileName)
else:
ErrorNotes.append('Unable to display module/form information; file "%s" not found.'%FileName)
self.LocatorDict = 'Not found'
##D('Mode=%s, LocatorDict=%s, ProjectDirectory=%s'%(self.Mode,self.LocatorDict,self.ProjectDirectory))
apply(Toplevel.__init__,(self,Master),kw)
self.title('%s in project'%self.TypeOfError)
#Place the dialog in the center of the screen.
Width, Height = (750,400)
ScreenWidth = self.winfo_screenwidth()
ScreenHeight = self.winfo_screenheight()
Factor = 0.6
Width = int(round(ScreenWidth * Factor))
Height = int(round(ScreenHeight * Factor))
X = (ScreenWidth-Width)/2
Y = (ScreenHeight-Height)/2
self.geometry('%sx%s+%s+%s'%(Width,Height,X,Y))
#self.geometry('+%s+%s'%(Width,X,Y))
self.Result = None
#
# Text widget for the traceback
#
self.T = Text(self,height=25)
self.T.pack(expand=YES,fill=BOTH)
self.T.tag_configure('Link', foreground='#009000')
self.T.tag_bind('Link','<ButtonRelease-1>',self.on_HyperLink)
if self.TypeOfError in ('IndentationError','SyntaxError'):
#Syntax-class errors are the poor cousin of Python errors. For all other types of error
# we get an entry in the traceback list but for syntax errors all we get is
# the error description the form "invalid syntax (filename, line n)". Here we
# extract the filename and line number from the error description so we can add a
# standard-form entry to the tracback list.
#In the code that follows:
# Line is the line number of the offending line
# Path is the path to the file that contains the offending line
# TheLine is the text of the offending line
#Extract filename and line (as in integer)
Filename,Line = self.ExtractStuff(self.ErrorDescription)
#Filename is just that, a filename with no path. It may be part of our project or it may
# be a file that our project includes. The locator dict keys mention all modules in
# our project so we scan them to see if the file is part of our project.
if type(self.LocatorDict) <> type({}):
#We were not able to read the locator dictionary
TheLine = '??????'
Path = Filename
elif Filename[:1] == '?':
#Some versions of python (eg 2.2 under windows) do not tell us the filename.
Path = Filename
TheLine = "<<Python did not report the name of the file in which the error was found.>>"
else:
#We have the locator dictionary
for P in self.LocatorDict.keys():
if Filename == P:
#We found it; it's one of us.
Path = self.ProjectDirectory + Filename
break
else:
#We didn't find it; walk the python module path looking for it.
Path = PythonModuleFind(Filename)
#Note that if we didn't find it in the PythonModule path either then
# Path will be None at this point.
if Path:
#We think we have a valid path
try:
F = open(Path,'r')
FileText = F.readlines()
F.close()
except:
FileText = ['[Unable to display line. An error happened while attempting to open "%s"]'%Path]
if len(FileText) < Line:
#The line is off the end; give them the last line
TheLine = FileText[-1].strip()
elif Line < 1:
print 'rpErrorHandler: Line is unexpectedly %s'%Line
TheLine = '<unavailable>'
else:
TheLine = FileText[Line-1].strip()
else:
#No valid path
Path = Filename
TheLine = '?????'
self.TracebackList.append((Path,Line,'?',TheLine))
#Having extracted and made use of the filename and line-number from the Error Description,
# we now trim them off so as not to be showing them twice.
self.ErrorDescription = self.ErrorDescription.split('(')[0]
#If there were any error notes, display them first
for Note in ErrorNotes:
self.T.insert(INSERT,'Note: %s\n'%Note)
#
# Report possible callback information
#
if self.func:
self.T.insert(INSERT,'Exception in Tk callback\n')
self.T.insert(INSERT,' Function: %s (type: %s)\n'%(repr(self.func), type(self.func)))
self.T.insert(INSERT,' Args: %s\n'% str(self.args))
#Figure out if the argument was an event
EventArg = type(self.args)==type(()) and len(self.args) > 0 and hasattr(self.args[0],'type')
#Display the traceback list
LinkCount = 0
self.T.insert(INSERT,'Traceback (most recent call last):\n')
#For some modes the first traceback entry is the call from Rapyd which not really
# of much use so we delete it.
if self.Mode in (0,1,2):
self.TracebackList = self.TracebackList[1:]
for File,Line,Func,LineText in self.TracebackList:
Module, Form,FormLine = self.Convert(File,Line)
self.T.insert(INSERT,' Module %s, Form %s, Line %s in %s (File %s, line %s)\n'
%(Module,Form,FormLine,Func,File,Line))
if self.Mode in (0,1,3) and not ((Module[0] in '?<') or (File[0]=='?')):
#We have been invoked from Rapyd and the subject line is in our project.
#Set tags so the text will be a clickable link.
Tags = 'Link =%s:%s:%s'%(Module,Form,FormLine)
print '<%s>'%LineText
self.T.config(cursor='hand2')
LinkCount += 1
else:
#This text is not jumptoable
Tags = None
if LineText == 'pass #---end-of-form---':
LineText = '(error detected at end-of-form)'
self.T.insert(INSERT,' %s\n'%LineText,Tags)
#For some errors, the error des
self.T.insert(INSERT,'%s: %s\n'%(self.TypeOfError,self.ErrorDescription))
#
#If we were able to display lines as links, give the user a heads up.
#
if LinkCount > 0:
if LinkCount == 1:
Msg = "\n(click on the green line above to go to that line in the corresponding code editor)\n"
else:
Msg = "\n(click on a green line above to go to that line in the corresponding code editor)\n"
self.T.insert(INSERT,Msg)
#
# If we have an event display some information about it
#
self.T.insert(INSERT,'\n')
if EventArg:
EventNum = int(self.args[0].type)
if EventNum in EventNames.keys():
self.T.insert(INSERT,'Event type: %s (type num: %s). Event content:\n'
%(EventNames[EventNum], EventNum))
Keys = self.args[0].__dict__.keys()
Keys.sort()
for Key in Keys:
self.T.insert(INSERT,' %s: %s\n'%(Key, self.args[0].__dict__[Key]))
#
# Button bar
#
self.Buttons = Frame(self)
self.Buttons.pack(side=BOTTOM)
Button(self.Buttons,text='Dismiss',command=self.on_OK).pack(side=LEFT,padx=10,pady=5)
if self.Mode in (0,1,3):
Button(self.Buttons,text='Help',command=self.on_Help).pack(side=LEFT,padx=10,pady=5)
self.bind('<Return>',self.on_OK)
self.bind('<Escape>',self.on_OK)
self.bind('<F1>',self.on_Help)
#
#be modal
#
self.focus_set()
Grabber(self)
self.wait_window()
def on_OK(self,Event=None):
"""
User clicked on Dismiss
"""
self.Result = None
if self.Mode == 3:
#We have to pass the result back via a file
try:
F = open(self.ResultPath,'w')
F.write('!HANDLED!')
F.close()
except:
print 'rpErrorHandler: Error writing result file'
self.destroy()
def on_Help(self,Event=None):
"""
User asked for help.
"""
if self.Mode == 0:
Help('run-error-dialog')
elif self.Mode in (1,3):
#We have to pass the result back via a file
try:
F = open(self.ResultPath,'w')
F.write('!HELP!')
F.close()
except:
print 'rpErrorHandler: Error writing result file'
if self.Mode == 1:
#If we are an actual process spawned by Rapyd then wrap up our process.
self.quit()
self.destroy()
def on_HyperLink(self,Event=None):
"""
User clicked on an error line which was rendered as a clickable link
"""
#Get tags associated with the cursor position
Tags = Event.widget.tag_names(CURRENT)
for T in Tags:
#look for a tag that starts with equal-sign. The rest of the tag will
# be "module:form:line"
if T[0:1] == '=':
self.Result = T[1:].split(':')
self.Result[2] = int(self.Result[2])
if self.Mode in (1,3):
#We have to pass the result back via a file
try:
F = open(self.ResultPath,'w')
F.write('%s %s %s'%tuple(self.Result))
F.close()
except:
print 'rpErrorHandler: Error writing result file'
if self.Mode == 1:
self.quit()
self.destroy()
def ExtractStuff(self,Line):
"""
Extract some stuff from a line
Given a line of the form:
"syntax error (ffff, line nnnn)"
Return ['ffff',nnnn]
Note: Some versions of Python (eg python2.2 under windows) return a line of the form
"syntax error (line nnnn)" without giving us the file. Without the file it's a
tad hard to fetch the offending line, but that's the way it is. In that case,
we return "????" in lieu of the filename as ffff.
"""
T = Line.split('(')[1] #ffff, line nnnn)
print 'T=%s'%T
T = T.split(',') #['ffff',' line nnnn)']
print 'T=%s'%T
if len(T) == 2:
#a file was specified
ffff = T[0]
T = T[1].strip() #'line nnnn)'
else:
#no file was specified
T = T[0]
ffff = "????"
T = T.split(' ')[1] # 'nnnn)'
nnnn = T[:-1]
return [ffff,int(nnnn)]
def Convert(self,Filename,LineNumber):
"""
Convert a generated file reference to a rapyd reference.
Filename is a path to the generated python file.
Linenumber is the line number in the generated file as returned by
python in origin-1
The result is a 3-list giving:
o [0] The name of the module of the rapid project.
o [0] The form (or '-Main-') in the module project.
o [1] The line number in the said form in origin-1.
If the file is not part of our project we return ('<None>','<None>',0)
"""
if type(self.LocatorDict) <> type({}):
#We don't have a valid locator dict
return ['?????','?????',0]
Filename = os.path.split(Filename)[1]
##D('Convert: Filename=%s, LineNumber=%s'%(Filename,LineNumber))
try:
Locator = self.LocatorDict[Filename]
except:
return ['<None>','<None>',0]
MainChunkOffset = 0
ModuleName = os.path.splitext(Filename)[0]
##D('=== seeking %s in %s'%(LineNumber,Filename))
for Form,Offset,Length in Locator:
##D('Form=%s Offset=%s Length=%s MainChunkOffset=%s'%(Form,Offset,Length,MainChunkOffset))
if Offset+Length > LineNumber-1:
Result = [ModuleName, Form, LineNumber-Offset]
if Form == '-Main-':
Result[2] += MainChunkOffset
return Result
if Form == '-Main-' and MainChunkOffset == 0:
#This little dance with MainChunkOffset is necessary because the
# -Main- code is split up into two chunks with the form
# code in between them.
MainChunkOffset = Length
#They asked for a line off the end. Give them the last line.
return [ModuleName, Form, Length+Offset]
|
gpl-3.0
| -1,601,429,762,813,613,000 | 42.39187 | 117 | 0.539871 | false |
sbinet-staging/pyrame
|
chkpyr/chkpyr.py
|
1
|
1163
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Frédéric Magniette, Miguel Rubio-Roy
# This file is part of Pyrame.
#
# Pyrame is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrame is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrame. If not, see <http://www.gnu.org/licenses/>
import socket
import sys
import bindpyrame
try:
sock = bindpyrame.open_socket(sys.argv[1],int(sys.argv[2]))
except Exception as e:
sys.stderr.write("[ERROR] %s\n" % e)
sys.exit(1)
sock.send(sys.argv[3]+"\n")
retcode,res = bindpyrame.get_cmd_result(sock)
print("retcode={0} res={1}".format(retcode,res))
sock.close()
if retcode==1:
sys.exit(0)
else:
sys.exit(1)
|
lgpl-3.0
| -3,458,277,395,945,562,000 | 27.317073 | 77 | 0.724376 | false |
lbouma/Cyclopath
|
pyserver/util_/strutil.py
|
1
|
4807
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# MAC_RE = re.compile(r'^(mc|mac|de|van)(.)(.*)')
#
# def capitalize_surname(name):
# '''Return a semi-intelligently capitalized version of name.'''
# name = name.lower()
# # capitalize letter after Mac/Mc/etc
# m = re.search(MAC_RE, name)
# if (m is not None):
# name = m.group(1) + m.group(2).upper() + m.group(3)
# # capitalize first letter, leave others alone
# return (name[:1].upper() + name[1:])
# ***
#
def sql_in_integers(int_list):
'''Convert an array of integers into a string suitable for SQL.'''
return "(%s)" % ','.join([str(some_int) for some_int in int_list])
# ***
#
def phonetic_crush(names):
'''Given a list of names: for each name foo, remove all following
phonetically similar names. Return the new list, in arbitrary order.'''
d = dict()
for name in reversed(names):
name = name[:-1] # strip trailing newline
d[caverphone(name)] = name.lower()
return d.values()
# This function is taken from the AdvaS library http://advas.sourceforge.net/
# by Frank Hofmann et al. and is GPL2.
def caverphone(term):
"returns the language key using the caverphone algorithm 2.0"
# Developed at the University of Otago, New Zealand.
# Project: Caversham Project (http://caversham.otago.ac.nz)
# Developer: David Hood, University of Otago, New Zealand
# Contact: caversham@otago.ac.nz
# Project Technical Paper: http://caversham.otago.ac.nz/files/working/ctp150804.pdf
# Version 2.0 (2004-08-15)
code = ""
i = 0
term_length = len(term)
if (term_length == 0):
# empty string ?
return code
# end if
# convert to lowercase
code = string.lower(term)
# remove anything not in the standard alphabet (a-z)
code = re.sub(r'[^a-z]', '', code)
# remove final e
if code.endswith("e"):
code = code[:-1]
# if the name starts with cough, rough, tough, enough or trough -> cou2f (rou2f, tou2f, enou2f, trough)
code = re.sub(r'^([crt]|(en)|(tr))ough', r'\1ou2f', code)
# if the name starts with gn -> 2n
code = re.sub(r'^gn', r'2n', code)
# if the name ends with mb -> m2
code = re.sub(r'mb$', r'm2', code)
# replace cq -> 2q
code = re.sub(r'cq', r'2q', code)
# replace c[i,e,y] -> s[i,e,y]
code = re.sub(r'c([iey])', r's\1', code)
# replace tch -> 2ch
code = re.sub(r'tch', r'2ch', code)
# replace c,q,x -> k
code = re.sub(r'[cqx]', r'k', code)
# replace v -> f
code = re.sub(r'v', r'f', code)
# replace dg -> 2g
code = re.sub(r'dg', r'2g', code)
# replace ti[o,a] -> si[o,a]
code = re.sub(r'ti([oa])', r'si\1', code)
# replace d -> t
code = re.sub(r'd', r't', code)
# replace ph -> fh
code = re.sub(r'ph', r'fh', code)
# replace b -> p
code = re.sub(r'b', r'p', code)
# replace sh -> s2
code = re.sub(r'sh', r's2', code)
# replace z -> s
code = re.sub(r'z', r's', code)
# replace initial vowel [aeiou] -> A
code = re.sub(r'^[aeiou]', r'A', code)
# replace all other vowels [aeiou] -> 3
code = re.sub(r'[aeiou]', r'3', code)
# replace j -> y
code = re.sub(r'j', r'y', code)
# replace an initial y3 -> Y3
code = re.sub(r'^y3', r'Y3', code)
# replace an initial y -> A
code = re.sub(r'^y', r'A', code)
# replace y -> 3
code = re.sub(r'y', r'3', code)
# replace 3gh3 -> 3kh3
code = re.sub(r'3gh3', r'3kh3', code)
# replace gh -> 22
code = re.sub(r'gh', r'22', code)
# replace g -> k
code = re.sub(r'g', r'k', code)
# replace groups of s,t,p,k,f,m,n by its single, upper-case equivalent
for single_letter in ["s", "t", "p", "k", "f", "m", "n"]:
otherParts = re.split(single_letter + "+", code)
code = string.join(otherParts, string.upper(single_letter))
# replace w[3,h3] by W[3,h3]
code = re.sub(r'w(h?3)', r'W\1', code)
# replace final w with 3
code = re.sub(r'w$', r'3', code)
# replace w -> 2
code = re.sub(r'w', r'2', code)
# replace h at the beginning with an A
code = re.sub(r'^h', r'A', code)
# replace all other occurrences of h with a 2
code = re.sub(r'h', r'2', code)
# replace r3 with R3
code = re.sub(r'r3', r'R3', code)
# replace final r -> 3
code = re.sub(r'r$', r'3', code)
# replace r with 2
code = re.sub(r'r', r'2', code)
# replace l3 with L3
code = re.sub(r'l3', r'L3', code)
# replace final l -> 3
code = re.sub(r'l$', r'3', code)
# replace l with 2
code = re.sub(r'l', r'2', code)
# remove all 2's
code = re.sub(r'2', r'', code)
# replace the final 3 -> A
code = re.sub(r'3$', r'A', code)
# remove all 3's
code = re.sub(r'3', r'', code)
# extend the code by 10 '1' (one)
code += '1' * 10
# take the first 10 characters
caverphoneCode = code[:10]
# return caverphone code
return caverphoneCode
# ***
if (__name__ == '__main__'):
pass
|
apache-2.0
| -3,592,574,386,759,142,400 | 23.155779 | 104 | 0.59871 | false |
Arundhatii/erpnext
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
|
1
|
12193
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, cint
from frappe.utils.user import get_user_fullname
from frappe.utils.print_format import download_pdf
from frappe.desk.form.load import get_attachments
from frappe.core.doctype.communication.email import make
from erpnext.accounts.party import get_party_account_currency, get_party_details
from erpnext.stock.doctype.material_request.material_request import set_missing_values
from erpnext.controllers.buying_controller import BuyingController
from erpnext.buying.utils import validate_for_items
STANDARD_USERS = ("Guest", "Administrator")
class RequestforQuotation(BuyingController):
def validate(self):
self.validate_duplicate_supplier()
self.validate_supplier_list()
validate_for_items(self)
self.update_email_id()
def validate_duplicate_supplier(self):
supplier_list = [d.supplier for d in self.suppliers]
if len(supplier_list) != len(set(supplier_list)):
frappe.throw(_("Same supplier has been entered multiple times"))
def validate_supplier_list(self):
for d in self.suppliers:
prevent_rfqs = frappe.db.get_value("Supplier", d.supplier, 'prevent_rfqs')
if prevent_rfqs:
standing = frappe.db.get_value("Supplier Scorecard",d.supplier, 'status')
frappe.throw(_("RFQs are not allowed for {0} due to a scorecard standing of {1}").format(d.supplier, standing))
warn_rfqs = frappe.db.get_value("Supplier", d.supplier, 'warn_rfqs')
if warn_rfqs:
standing = frappe.db.get_value("Supplier Scorecard",d.supplier, 'status')
frappe.msgprint(_("{0} currently has a {1} Supplier Scorecard standing, and RFQs to this supplier should be issued with caution.").format(d.supplier, standing), title=_("Caution"), indicator='orange')
def update_email_id(self):
for rfq_supplier in self.suppliers:
if not rfq_supplier.email_id:
rfq_supplier.email_id = frappe.db.get_value("Contact", rfq_supplier.contact, "email_id")
def validate_email_id(self, args):
if not args.email_id:
frappe.throw(_("Row {0}: For supplier {0} Email Address is required to send email").format(args.idx, args.supplier))
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
for supplier in self.suppliers:
supplier.email_sent = 0
supplier.quote_status = 'Pending'
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def send_to_supplier(self):
for rfq_supplier in self.suppliers:
if rfq_supplier.send_email:
self.validate_email_id(rfq_supplier)
# make new user if required
update_password_link = self.update_supplier_contact(rfq_supplier, self.get_link())
self.update_supplier_part_no(rfq_supplier)
self.supplier_rfq_mail(rfq_supplier, update_password_link, self.get_link())
rfq_supplier.email_sent = 1
rfq_supplier.save()
def get_link(self):
# RFQ link for supplier portal
return get_url("/rfq/" + self.name)
def update_supplier_part_no(self, args):
self.vendor = args.supplier
for item in self.items:
item.supplier_part_no = frappe.db.get_value('Item Supplier',
{'parent': item.item_code, 'supplier': args.supplier}, 'supplier_part_no')
def update_supplier_contact(self, rfq_supplier, link):
'''Create a new user for the supplier if not set in contact'''
update_password_link = ''
if frappe.db.exists("User", rfq_supplier.email_id):
user = frappe.get_doc("User", rfq_supplier.email_id)
else:
user, update_password_link = self.create_user(rfq_supplier, link)
self.update_contact_of_supplier(rfq_supplier, user)
return update_password_link
def update_contact_of_supplier(self, rfq_supplier, user):
if rfq_supplier.contact:
contact = frappe.get_doc("Contact", rfq_supplier.contact)
else:
contact = frappe.new_doc("Contact")
contact.first_name = rfq_supplier.supplier_name or rfq_supplier.supplier
contact.append('links', {
'link_doctype': 'Supplier',
'link_name': rfq_supplier.supplier
})
if not contact.email_id and not contact.user:
contact.email_id = user.name
contact.user = user.name
contact.save(ignore_permissions=True)
def create_user(self, rfq_supplier, link):
user = frappe.get_doc({
'doctype': 'User',
'send_welcome_email': 0,
'email': rfq_supplier.email_id,
'first_name': rfq_supplier.supplier_name or rfq_supplier.supplier,
'user_type': 'Website User',
'redirect_url': link
})
user.save(ignore_permissions=True)
update_password_link = user.reset_password()
return user, update_password_link
def supplier_rfq_mail(self, data, update_password_link, rfq_link):
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'update_password_link': update_password_link,
'message': frappe.render_template(self.message_for_supplier, data.as_dict()),
'rfq_link': rfq_link,
'user_fullname': full_name
}
subject = _("Request for Quotation")
template = "templates/emails/request_for_quotation.html"
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
message = frappe.get_template(template).render(args)
attachments = self.get_attachments()
self.send_email(data, sender, subject, message, attachments)
def send_email(self, data, sender, subject, message, attachments):
make(subject = subject, content=message,recipients=data.email_id,
sender=sender,attachments = attachments, send_email=True,
doctype=self.doctype, name=self.name)["name"]
frappe.msgprint(_("Email sent to supplier {0}").format(data.supplier))
def get_attachments(self):
attachments = [d.name for d in get_attachments(self.doctype, self.name)]
attachments.append(frappe.attach_print(self.doctype, self.name, doc=self))
return attachments
def update_rfq_supplier_status(self, sup_name=None):
for supplier in self.suppliers:
if sup_name == None or supplier.supplier == sup_name:
if supplier.quote_status != _('No Quote'):
quote_status = _('Received')
for item in self.items:
sqi_count = frappe.db.sql("""
SELECT
COUNT(sqi.name) as count
FROM
`tabSupplier Quotation Item` as sqi,
`tabSupplier Quotation` as sq
WHERE sq.supplier = %(supplier)s
AND sqi.docstatus = 1
AND sqi.request_for_quotation_item = %(rqi)s
AND sqi.parent = sq.name""",
{"supplier": supplier.supplier, "rqi": item.name}, as_dict=1)[0]
if (sqi_count.count) == 0:
quote_status = _('Pending')
supplier.quote_status = quote_status
@frappe.whitelist()
def send_supplier_emails(rfq_name):
check_portal_enabled('Request for Quotation')
rfq = frappe.get_doc("Request for Quotation", rfq_name)
if rfq.docstatus==1:
rfq.send_to_supplier()
def check_portal_enabled(reference_doctype):
if not frappe.db.get_value('Portal Menu Item',
{'reference_doctype': reference_doctype}, 'enabled'):
frappe.throw(_("Request for Quotation is disabled to access from portal, for more check portal settings."))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["show_sidebar"] = True
return list_context
def get_supplier_contacts(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select `tabContact`.name from `tabContact`, `tabDynamic Link`
where `tabDynamic Link`.link_doctype = 'Supplier' and (`tabDynamic Link`.link_name = %(name)s
or `tabDynamic Link`.link_name like %(txt)s) and `tabContact`.name = `tabDynamic Link`.parent
limit %(start)s, %(page_len)s""", {"start": start, "page_len":page_len, "txt": "%%%s%%" % txt, "name": filters.get('supplier')})
# This method is used to make supplier quotation from material request form.
@frappe.whitelist()
def make_supplier_quotation(source_name, for_supplier, target_doc=None):
def postprocess(source, target_doc):
target_doc.supplier = for_supplier
args = get_party_details(for_supplier, party_type="Supplier", ignore_permissions=True)
target_doc.currency = args.currency or get_party_account_currency('Supplier', for_supplier, source.company)
target_doc.buying_price_list = args.buying_price_list or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Request for Quotation", source_name, {
"Request for Quotation": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Quotation Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "request_for_quotation_item",
"parent": "request_for_quotation"
},
}
}, target_doc, postprocess)
return doclist
# This method is used to make supplier quotation from supplier's portal.
@frappe.whitelist()
def create_supplier_quotation(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
try:
sq_doc = frappe.get_doc({
"doctype": "Supplier Quotation",
"supplier": doc.get('supplier'),
"terms": doc.get("terms"),
"company": doc.get("company"),
"currency": doc.get('currency') or get_party_account_currency('Supplier', doc.get('supplier'), doc.get('company')),
"buying_price_list": doc.get('buying_price_list') or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
})
add_items(sq_doc, doc.get('supplier'), doc.get('items'))
sq_doc.flags.ignore_permissions = True
sq_doc.run_method("set_missing_values")
sq_doc.save()
frappe.msgprint(_("Supplier Quotation {0} created").format(sq_doc.name))
return sq_doc.name
except Exception:
return None
def add_items(sq_doc, supplier, items):
for data in items:
if data.get("qty") > 0:
if isinstance(data, dict):
data = frappe._dict(data)
create_rfq_items(sq_doc, supplier, data)
def create_rfq_items(sq_doc, supplier, data):
sq_doc.append('items', {
"item_code": data.item_code,
"item_name": data.item_name,
"description": data.description,
"qty": data.qty,
"rate": data.rate,
"supplier_part_no": frappe.db.get_value("Item Supplier", {'parent': data.item_code, 'supplier': supplier}, "supplier_part_no"),
"warehouse": data.warehouse or '',
"request_for_quotation_item": data.name,
"request_for_quotation": data.parent
})
@frappe.whitelist()
def get_pdf(doctype, name, supplier_idx):
doc = get_rfq_doc(doctype, name, supplier_idx)
if doc:
download_pdf(doctype, name, doc=doc)
def get_rfq_doc(doctype, name, supplier_idx):
if cint(supplier_idx):
doc = frappe.get_doc(doctype, name)
args = doc.get('suppliers')[cint(supplier_idx) - 1]
doc.update_supplier_part_no(args)
return doc
@frappe.whitelist()
def get_item_from_material_requests_based_on_supplier(source_name, target_doc = None):
mr_items_list = frappe.db.sql("""
SELECT
mr.name, mr_item.item_code
FROM
`tabItem` as item,
`tabItem Supplier` as item_supp,
`tabMaterial Request Item` as mr_item,
`tabMaterial Request` as mr
WHERE item_supp.supplier = %(supplier)s
AND item.name = item_supp.parent
AND mr_item.parent = mr.name
AND mr_item.item_code = item.name
AND mr.status != "Stopped"
AND mr.material_request_type = "Purchase"
AND mr.docstatus = 1
AND mr.per_ordered < 99.99""", {"supplier": source_name}, as_dict=1)
material_requests = {}
for d in mr_items_list:
material_requests.setdefault(d.name, []).append(d.item_code)
for mr, items in material_requests.items():
target_doc = get_mapped_doc("Material Request", mr, {
"Material Request": {
"doctype": "Request for Quotation",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"],
}
},
"Material Request Item": {
"doctype": "Request for Quotation Item",
"condition": lambda row: row.item_code in items,
"field_map": [
["name", "material_request_item"],
["parent", "material_request"],
["uom", "uom"]
]
}
}, target_doc)
return target_doc
|
gpl-3.0
| 6,958,179,286,313,263,000 | 35.28869 | 204 | 0.700238 | false |
becomejapan/yahooads-python-lib
|
examples/RetargetingListService/RetargetingListService_mutate_SET.py
|
1
|
11527
|
# Copyright 2017 Become Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for
Service : RetargetingListService
Operation: mutate(SET)
API Reference: https://github.com/yahoojp-marketing/sponsored-search-api-documents/blob/201901/docs/en/api_reference/services/RetargetingListService.md
Generated by 'api_reference_example_generator.py' using code template 'examples/sample_template.py.template'
"""
import logging
import json
from yahooads import promotionalads
logging.basicConfig(level=logging.INFO)
# logging.getLogger('suds.client').setLevel(logging.DEBUG)
# logging.getLogger('suds.transport').setLevel(logging.DEBUG)
SERVICE = 'RetargetingListService'
OPERATION = 'mutate(SET)'
OPERAND = {
"operator": "SET",
"accountId": "SAMPLE-ACCOUNT-ID",
"operand": [
{
"xsi_type": "DefaultTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"targetListId": "2222",
"targetListType": "DEFAULT",
"targetListName": "TargetList Default",
"targetListDescription": "TargetList Default",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180"
},
{
"xsi_type": "RuleBaseTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"targetListId": "12222",
"targetListType": "RULE",
"targetListName": "TargetList RULE",
"targetListDescription": "TargetList RULE",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180",
"rules": [
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "EQUALS",
"value": "http://yahoo.co.jp",
"urlKey": "URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_EQUAL",
"value": "http://not.equal.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "CONTAINS",
"value": "http://contains.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_CONTAIN",
"value": "http://not.contain.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "STARTS_WITH",
"value": "http://starts.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_START_WITH",
"value": "http://not.start.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "ENDS_WITH",
"value": "http://ends.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_END_WITH",
"value": "http://not.end.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
}
],
"isAllVisitor": "TRUE",
"isDateSpecific": "FALSE"
},
{
"xsi_type": "LogicalTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"targetListId": "912222",
"targetListType": "LOGICAL",
"targetListName": "TargetList LOGICAL",
"targetListDescription": "TargetList LOGICAL",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180",
"logicalGroup": [
{
"condition": "AND",
"logicalOperand": [
{
"targetListId": "1111111"
},
{
"targetListId": "1111112"
},
{
"targetListId": "1111113"
}
]
},
{
"condition": "OR",
"logicalOperand": [
{
"targetListId": "2111111"
},
{
"targetListId": "2111112"
}
]
},
{
"condition": "NOT",
"logicalOperand": {
"targetListId": "3111111"
}
}
]
}
]
}
"""
SAMPLE RESPONSE = {
"rval": {
"ListReturnValue.Type": "RetargetingListReturnValue",
"Operation.Type": "SET",
"values": [
{
"operationSucceeded": "true",
"targetList": {
"xsi_type": "DefaultTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"owner": "OWNER",
"retargetingAccountStatus": {
"agreeDate": "20170529",
"reviewStatus": "APPROVED"
},
"targetListId": "2222",
"targetListTrackId": "3333",
"targetListType": "DEFAULT",
"targetListName": "TargetList Default",
"targetListDescription": "TargetList Default",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180",
"reach": "0",
"tag": ""
}
},
{
"operationSucceeded": "true",
"targetList": {
"xsi_type": "RuleBaseTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"owner": "SHARED",
"retargetingAccountStatus": {
"agreeDate": "20170629",
"reviewStatus": "APPROVED"
},
"targetListId": "12222",
"targetListTrackId": "13333",
"targetListType": "RULE",
"targetListName": "TargetList RULE",
"targetListDescription": "TargetList RULE",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180",
"reach": "0",
"rules": [
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "EQUALS",
"value": "http://yahoo.co.jp",
"urlKey": "URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_EQUAL",
"value": "http://not.equal.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "CONTAINS",
"value": "http://contains.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_CONTAIN",
"value": "http://not.contain.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "STARTS_WITH",
"value": "http://starts.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_START_WITH",
"value": "http://not.start.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "ENDS_WITH",
"value": "http://ends.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
},
{
"ruleItems": {
"xsi_type": "UrlRuleItem",
"ruleType": "URL_RULE",
"operator": "NOT_END_WITH",
"value": "http://not.end.with.yahoo.co.jp",
"urlKey": "REFFER_URL"
}
}
],
"isAllVisitor": "TRUE",
"isDateSpecific": "FALSE"
}
},
{
"operationSucceeded": "true",
"targetList": {
"xsi_type": "LogicalTargetList",
"accountId": "SAMPLE-ACCOUNT-ID",
"owner": "SHARED",
"retargetingAccountStatus": {
"agreeDate": "20170629",
"reviewStatus": "APPROVED"
},
"targetListId": "912222",
"targetListTrackId": "913333",
"targetListType": "LOGICAL",
"targetListName": "TargetList LOGICAL",
"targetListDescription": "TargetList LOGICAL",
"reachStorageStatus": "OPEN",
"reachStorageSpan": "180",
"reach": "0",
"logicalGroup": [
{
"condition": "AND",
"logicalOperand": [
{
"targetListId": "1111111"
},
{
"targetListId": "1111112"
},
{
"targetListId": "1111113"
}
]
},
{
"condition": "OR",
"logicalOperand": [
{
"targetListId": "2111111"
},
{
"targetListId": "2111112"
}
]
},
{
"condition": "NOT",
"logicalOperand": {
"targetListId": "3111111"
}
}
]
}
}
]
}
}
"""
def main():
client = promotionalads.PromotionalAdsClient.LoadFromConfiguration()
service = client.GetService(SERVICE)
print("REQUEST : {}.{}\n{}".format(SERVICE, OPERATION, json.dumps(OPERAND, indent=2)))
try:
if OPERATION == "get":
response = service.get(OPERAND)
elif OPERATION.startswith("get"):
get_method = getattr(service, OPERATION)
response = get_method(OPERAND)
elif OPERATION.startswith("mutate"):
response = service.mutate(OPERAND)
else:
raise("Unknown Operation '{}'".format(OPERATION))
print("RESPONSE :\n{}".format(response))
except Exception as e:
print("Exception at '{}' operations \n{}".format(SERVICE, e))
raise e
if __name__ == '__main__':
main()
|
apache-2.0
| 6,747,473,012,270,576,000 | 29.018229 | 151 | 0.452937 | false |
Jumpscale/core9
|
setup.py
|
1
|
2646
|
from setuptools import setup, find_packages
from distutils.sysconfig import get_python_lib
from setuptools.command.install import install as _install
from setuptools.command.develop import develop as _develop
import os
def _post_install(libname, libpath):
from JumpScale9 import j # here its still the boostrap JumpScale9
# remove leftovers
for item in j.sal.fs.find("/usr/local/bin/", fileregex="js9*"):
j.sal.fs.remove("/usr/local/bin/%s" % item)
j.tools.executorLocal.initEnv()
j.tools.jsloader.generate()
class install(_install):
def run(self):
_install.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath), msg="Running post install task")
class develop(_develop):
def run(self):
_develop.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath), msg="Running post install task")
long_description = ""
try:
from pypandoc import convert
long_description = convert('README.md', 'rst')
except ImportError:
long_description = ""
setup(
name='JumpScale9',
version='9.4.0',
description='Automation framework for cloud workloads',
long_description=long_description,
url='https://github.com/Jumpscale/core9',
author='GreenItGlobe',
author_email='info@gig.tech',
license='Apache',
packages=find_packages(),
# IF YOU CHANGE ANYTHING HERE, LET DESPIEGK NOW (DO NOT INSTALL ANYTHING WHICH NEEDS TO COMPILE)
install_requires=[
'GitPython>=2.1.3',
'click>=6.7',
'colored_traceback',
'colorlog>=2.10.0',
'httplib2>=0.10.3',
'ipython>=6.0.0',
'libtmux>=0.7.1',
'netaddr>=0.7.19',
'path.py>=10.3.1',
'pystache>=0.5.4',
'python-dateutil>=2.6.0',
'pytoml>=0.1.12',
'toml',
'redis>=2.10.5',
'requests>=2.13.0',
'future>=0.16.0',
'watchdog',
'msgpack-python',
'npyscreen',
'pyyaml',
'pyserial>=3.0'
'docker>=3',
'fakeredis',
'ssh2-python',
'parallel_ssh>=1.4.0',
'psutil>=5.4.3',
'Unidecode>=1.0.22',
],
cmdclass={
'install': install,
'develop': develop,
'developement': develop
},
scripts=[
'cmds/js9',
'cmds/js9_code',
'cmds/js9_docker',
'cmds/js9_doc',
],
)
|
apache-2.0
| 2,619,547,504,338,792,000 | 26 | 100 | 0.592215 | false |
skirsdeda/djangocms-blog
|
djangocms_blog/cms_toolbars.py
|
1
|
3587
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from django.core.urlresolvers import reverse
from django.utils.translation import override, ugettext_lazy as _
from .settings import get_setting
@toolbar_pool.register
class BlogToolbar(CMSToolbar):
def populate(self):
if (not self.is_current_app and not get_setting('ENABLE_THROUGH_TOOLBAR_MENU')) or \
not self.request.user.has_perm('djangocms_blog.add_post'):
return # pragma: no cover
admin_menu = self.toolbar.get_or_create_menu('djangocms_blog', _('Blog'))
with override(self.current_lang):
url = reverse('admin:djangocms_blog_post_changelist')
admin_menu.add_modal_item(_('Post list'), url=url)
url = reverse('admin:djangocms_blog_post_add')
admin_menu.add_modal_item(_('Add post'), url=url)
current_config = getattr(self.request, get_setting('CURRENT_NAMESPACE'), None)
if current_config:
url = reverse('admin:djangocms_blog_blogconfig_change', args=(current_config.pk,))
admin_menu.add_modal_item(_('Edit configuration'), url=url)
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if current_post and self.request.user.has_perm('djangocms_blog.change_post'): # pragma: no cover # NOQA
admin_menu.add_modal_item(_('Edit Post'), reverse(
'admin:djangocms_blog_post_change', args=(current_post.pk,)),
active=True)
def add_publish_button(self):
"""
Adds the publish button to the toolbar if the current post is unpublished
"""
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if (self.toolbar.edit_mode and current_post and
not current_post.publish and
self.request.user.has_perm('djangocms_blog.change_post')
): # pragma: no cover # NOQA
classes = ['cms-btn-action', 'blog-publish']
title = _('Publish {0} now').format(current_post.app_config.object_name)
url = admin_reverse('djangocms_blog_publish_article', args=(current_post.pk,))
self.toolbar.add_button(title, url=url, extra_classes=classes, side=self.toolbar.RIGHT)
def post_template_populate(self):
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if current_post and self.request.user.has_perm('djangocms_blog.change_post'): # pragma: no cover # NOQA
# removing page meta menu, if present, to avoid confusion
try: # pragma: no cover
import djangocms_page_meta # NOQA
menu = self.request.toolbar.get_or_create_menu('page')
pagemeta = menu.get_or_create_menu('pagemeta', 'meta')
menu.remove_item(pagemeta)
except ImportError:
pass
# removing page tags menu, if present, to avoid confusion
try: # pragma: no cover
import djangocms_page_tags # NOQA
menu = self.request.toolbar.get_or_create_menu('page')
pagetags = menu.get_or_create_menu('pagetags', 'tags')
menu.remove_item(pagetags)
except ImportError:
pass
self.add_publish_button()
|
bsd-3-clause
| 7,367,875,757,228,863,000 | 48.819444 | 117 | 0.618344 | false |
Guts/DicoGIS
|
dicogis/georeaders/Infos_DXF.py
|
1
|
8858
|
#! python3 # noqa: E265
# ----------------------------------------------------------------------------
# Name: Infos DXF
# Purpose: Use GDAL/OGR and dxfgrabber to read AutoCAD exchanges file format.
#
# Author: Julien Moura (https://github.com/Guts/)
#
# ----------------------------------------------------------------------------
# ############################################################################
# ######### Libraries #############
# #################################
# Standard library
import logging
from collections import OrderedDict
from os import chdir, path
from time import localtime, strftime
# 3rd party libraries
import dxfgrabber
try:
from osgeo import gdal
except ImportError:
import gdal
# custom submodules
try:
from .gdal_exceptions_handler import GdalErrorHandler
from .geo_infos_generic import GeoInfosGenericReader
from .geoutils import Utils
except ValueError:
from gdal_exceptions_handler import GdalErrorHandler
from geo_infos_generic import GeoInfosGenericReader
from geoutils import Utils
# ############################################################################
# ######### Globals ############
# ##############################
gdal_err = GdalErrorHandler()
georeader = GeoInfosGenericReader()
logger = logging.getLogger(__name__)
youtils = Utils()
# ##############################################################################
# ########## Classes #############
# ################################
class ReadDXF:
def __init__(self, source_path, dico_dataset, tipo, txt=""):
"""Uses OGR functions to extract basic informations about
geographic vector file (handles shapefile or MapInfo tables)
and store into dictionaries.
source_path = path to the DXF file
dico_dataset = dictionary for global informations
tipo = format
text = dictionary of text in the selected language
"""
# handling ogr specific exceptions
errhandler = gdal_err.handler
gdal.PushErrorHandler(errhandler)
gdal.UseExceptions()
self.alert = 0
# changing working directory to layer folder
chdir(path.dirname(source_path))
# opening DXF
try:
# driver_dxf = ogr.GetDriverByName(str("DXF"))
# dxf = driver_dxf.Open(source_path, 0)
src = gdal.OpenEx(source_path, 0)
except Exception as err:
logging.error(err)
youtils.erratum(dico_dataset, source_path, "err_corrupt")
self.alert = self.alert + 1
return None
# raising incompatible files
if not src:
"""if file is not compatible"""
self.alert += 1
dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
youtils.erratum(dico_dataset, source_path, "err_nobjet")
return None
else:
layer = src.GetLayer() # get the layer
pass
# DXF name and parent folder
try:
dico_dataset["name"] = path.basename(src.GetName())
dico_dataset["folder"] = path.dirname(src.GetName())
except AttributeError as err:
logger.warning(err)
dico_dataset["name"] = path.basename(source_path)
dico_dataset["folder"] = path.dirname(source_path)
# specific AutoDesk informations
douxef = dxfgrabber.readfile(source_path)
dico_dataset["version_code"] = douxef.dxfversion
# see: http://dxfgrabber.readthedocs.org/en/latest/#Drawing.dxfversion
if douxef.dxfversion == "AC1009":
dico_dataset["version_name"] = "AutoCAD R12"
elif douxef.dxfversion == "AC1015":
dico_dataset["version_name"] = "AutoCAD R2000"
elif douxef.dxfversion == "AC1018":
dico_dataset["version_name"] = "AutoCAD R2004"
elif douxef.dxfversion == "AC1021":
dico_dataset["version_name"] = "AutoCAD R2007"
elif douxef.dxfversion == "AC1024":
dico_dataset["version_name"] = "AutoCAD R2010"
elif douxef.dxfversion == "AC1027":
dico_dataset["version_name"] = "AutoCAD R2013"
else:
dico_dataset["version_name"] = "douxef.dxfversion"
# layers count and names
dico_dataset["layers_count"] = src.GetLayerCount()
li_layers_names = []
li_layers_idx = []
dico_dataset["layers_names"] = li_layers_names
dico_dataset["layers_idx"] = li_layers_idx
# dependencies and total size
dependencies = youtils.list_dependencies(source_path, "auto")
dico_dataset["dependencies"] = dependencies
dico_dataset["total_size"] = youtils.sizeof(source_path, dependencies)
# global dates
crea, up = path.getctime(source_path), path.getmtime(source_path)
dico_dataset["date_crea"] = strftime("%Y/%m/%d", localtime(crea))
dico_dataset["date_actu"] = strftime("%Y/%m/%d", localtime(up))
# total fields count
total_fields = 0
dico_dataset["total_fields"] = total_fields
# total objects count
total_objs = 0
dico_dataset["total_objs"] = total_objs
# parsing layers
for layer_idx in range(src.GetLayerCount()):
# dictionary where will be stored informations
dico_layer = OrderedDict()
dico_layer["src_name"] = dico_dataset.get("name")
# getting layer object
layer = src.GetLayerByIndex(layer_idx)
# layer globals
li_layers_names.append(layer.GetName())
dico_layer["title"] = georeader.get_title(layer)
li_layers_idx.append(layer_idx)
# features
layer_feat_count = layer.GetFeatureCount()
dico_layer["num_obj"] = layer_feat_count
if layer_feat_count == 0:
"""if layer doesn't have any object, return an error"""
dico_layer["error"] = "err_nobjet"
self.alert = self.alert + 1
else:
pass
# fields
layer_def = layer.GetLayerDefn()
dico_layer["num_fields"] = layer_def.GetFieldCount()
dico_layer["fields"] = georeader.get_fields_details(layer_def)
# geometry type
dico_layer["type_geom"] = georeader.get_geometry_type(layer)
# SRS
srs_details = georeader.get_srs_details(layer, txt)
dico_layer["srs"] = srs_details[0]
dico_layer["epsg"] = srs_details[1]
dico_layer["srs_type"] = srs_details[2]
# spatial extent
extent = georeader.get_extent_as_tuple(layer)
dico_layer["xmin"] = extent[0]
dico_layer["xmax"] = extent[1]
dico_layer["ymin"] = extent[2]
dico_layer["ymax"] = extent[3]
# storing layer into the GDB dictionary
dico_dataset[
"{0}_{1}".format(layer_idx, dico_layer.get("title"))
] = dico_layer
# summing fields number
total_fields += dico_layer.get("num_fields", 0)
# summing objects number
total_objs += dico_layer.get("num_obj", 0)
# deleting dictionary to ensure having cleared space
del dico_layer
# storing fileds and objects sum
dico_dataset["total_fields"] = total_fields
dico_dataset["total_objs"] = total_objs
# warnings messages
if self.alert:
dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
else:
pass
# clean exit
del src
# ############################################################################
# ##### Stand alone program ######
# ################################
if __name__ == "__main__":
"""standalone execution for tests. Paths are relative considering a test
within the official repository (https://github.com/Guts/DicoGIS/)"""
# test text dictionary
textos = OrderedDict()
textos["srs_comp"] = "Compound"
textos["srs_geoc"] = "Geocentric"
textos["srs_geog"] = "Geographic"
textos["srs_loca"] = "Local"
textos["srs_proj"] = "Projected"
textos["srs_vert"] = "Vertical"
textos["geom_point"] = "Point"
textos["geom_ligne"] = "Line"
textos["geom_polyg"] = "Polygon"
# searching for DX Files
num_folders = 0
li_dxf = [r"..\..\test\datatest\cao\dxf\paris_transports_ed.dxf"]
# recipient datas
dico_dataset = OrderedDict()
# read DXF
for source_path in li_dxf:
dico_dataset.clear()
source_path = path.abspath(source_path)
if path.isfile(source_path):
ReadDXF(source_path, dico_dataset, "AutoCAD DXF", textos)
else:
pass
|
gpl-3.0
| -2,622,093,644,684,382,700 | 35.155102 | 82 | 0.546963 | false |
Ex-sabagostar/atom-shell
|
script/lib/util.py
|
1
|
2645
|
#!/usr/bin/env python
import atexit
import contextlib
import errno
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib2
import os
import zipfile
def tempdir(prefix=''):
directory = tempfile.mkdtemp(prefix=prefix)
atexit.register(shutil.rmtree, directory)
return directory
@contextlib.contextmanager
def scoped_cwd(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def download(text, url, path):
with open(path, 'w') as local_file:
web_file = urllib2.urlopen(url)
file_size = int(web_file.info().getheaders("Content-Length")[0])
downloaded_size = 0
block_size = 128
ci = os.environ.get('CI') == '1'
while True:
buf = web_file.read(block_size)
if not buf:
break
downloaded_size += len(buf)
local_file.write(buf)
if not ci:
percent = downloaded_size * 100. / file_size
status = "\r%s %10d [%3.1f%%]" % (text, downloaded_size, percent)
print status,
if ci:
print "%s done." % (text)
else:
print
def extract_tarball(tarball_path, member, destination):
with tarfile.open(tarball_path) as tarball:
tarball.extract(member, destination)
def extract_zip(zip_path, destination):
if sys.platform == 'darwin':
# Use unzip command on Mac to keep symbol links in zip file work.
execute(['unzip', zip_path, '-d', destination])
else:
with zipfile.ZipFile(zip_path) as z:
z.extractall(destination)
def make_zip(zip_file_path, files, dirs):
safe_unlink(zip_file_path)
if sys.platform == 'darwin':
files += dirs
execute(['zip', '-r', '-y', zip_file_path] + files)
else:
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_DEFLATED)
for filename in files:
zip_file.write(filename, filename)
for dirname in dirs:
for root, _, filenames in os.walk(dirname):
for f in filenames:
zip_file.write(os.path.join(root, f))
zip_file.close()
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_unlink(path):
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def execute(argv):
try:
return subprocess.check_output(argv, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
raise e
def get_atom_shell_version():
return subprocess.check_output(['git', 'describe', '--tags']).strip()
|
mit
| 5,613,780,633,471,564,000 | 20.680328 | 75 | 0.647637 | false |
jasonz93/python-tordatahub
|
tordatahub/tests/blob/blob_topic_sub.py
|
1
|
3267
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import time
import traceback
from tordatahub import DataHub
from tordatahub.utils import Configer
from tordatahub.models import Topic, RecordType, FieldType, RecordSchema, BlobRecord, CursorType
from tordatahub.errors import DatahubException, ObjectAlreadyExistException
configer = Configer('../tordatahub.ini')
access_id = configer.get('tordatahub', 'access_id', '')
access_key = configer.get('tordatahub', 'access_key', '')
endpoint = configer.get('tordatahub', 'endpoint', '')
project_name = configer.get('tordatahub', 'project_name', 'pydatahub_project_test')
topic_name = configer.get('tordatahub', 'topic_name', 'pydatahub_blob_topic_test')
print "======================================="
print "access_id: %s" % access_id
print "access_key: %s" % access_key
print "endpoint: %s" % endpoint
print "project_name: %s" % project_name
print "topic_name: %s" % topic_name
print "=======================================\n\n"
if not access_id or not access_key or not endpoint:
print "access_id and access_key and endpoint must be set!"
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
topic = Topic(name=topic_name)
topic.project_name = project_name
topic.shard_count = 3
topic.life_cycle = 7
topic.record_type = RecordType.BLOB
try:
dh.create_topic(topic)
print "create topic success!"
print "=======================================\n\n"
except ObjectAlreadyExistException, e:
print "topic already exist!"
print "=======================================\n\n"
except Exception, e:
print traceback.format_exc()
sys.exit(-1)
try:
topic = dh.get_topic(topic_name, project_name)
print "get topic suc! topic=%s" % str(topic)
if topic.record_type != RecordType.BLOB:
print "topic type illegal!"
sys.exit(-1)
print "=======================================\n\n"
cursor = dh.get_cursor(project_name, topic_name, CursorType.OLDEST, '0')
index = 0
while True:
(record_list, record_num, next_cursor) = dh.get_records(topic, '0', cursor, 3)
for record in record_list:
with open('0_%d.png' % index, 'wb') as f:
f.write(record.blobdata)
print "create 0_%d.png suc" % index
index+=1
if 0 == record_num:
time.sleep(1)
cursor = next_cursor
except DatahubException, e:
print traceback.format_exc()
sys.exit(-1)
else:
sys.exit(-1)
|
apache-2.0
| 311,997,725,223,724,900 | 34.51087 | 96 | 0.651668 | false |
elbeardmorez/quodlibet
|
quodlibet/tests/test_qltk_info.py
|
1
|
1263
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from quodlibet import app
from tests import TestCase, destroy_fake_app, init_fake_app
from senf import mkstemp
from quodlibet.player.nullbe import NullPlayer
from quodlibet.qltk.info import SongInfo
from quodlibet.library import SongLibrary
SOME_PATTERN = "foo\n[big]<title>[/big] - <artist>"
class FakePatternEdit(object):
@property
def text(self):
return SOME_PATTERN
class TSongInfo(TestCase):
def setUp(self):
init_fake_app()
fd, self.filename = mkstemp()
os.close(fd)
self.info = SongInfo(SongLibrary(), NullPlayer(), self.filename)
def test_save(self):
fake_edit = FakePatternEdit()
self.info._on_set_pattern(None, fake_edit, app.player)
with open(self.filename, "r") as f:
contents = f.read()
self.failUnlessEqual(contents, SOME_PATTERN + "\n")
def tearDown(self):
destroy_fake_app()
self.info.destroy()
os.unlink(self.filename)
|
gpl-2.0
| 839,891,485,500,743,700 | 26.456522 | 72 | 0.673793 | false |
mclarkelauer/AndroidAnalyzer
|
log.py
|
1
|
1320
|
__author__ = 'Matt Clarke-Lauer'
__email__ = 'matt@clarkelauer.com'
__credits__ = ['Matt Clarke-Lauer']
__date__ = 7 / 1 / 13
__version__ = '0.1'
__status__ = 'Development'
from datetime import datetime
import os,sys,profile,traceback
import pickle
import inspect
logString = "[DexScope:"
logLevel = {
0:{"Type":"INFO","On":True},
1:{"Type":"DEBUG","On":True},
2:{"Type":"WARNING","On":True},
3:{"Type":"ERROR","On":True},
4:{"Type":"CRITICAL","On":True},
}
def info(message):
printLogMessage(message,level=0)
def debug(message):
printLogMessage(message,level=1)
def warning(message):
printLogMessage(message,level=2)
def error(message):
printLogMessage(message,level=3)
def critical(message):
printLogMessage(message,level=4)
def printLogMessage(message,level= 0):
frm = inspect.stack()[2]
mod = inspect.getmodule((frm[0]))
if logLevel[level]['On']:
log = logString + mod.__name__ + ":" + str(datetime.now()) + "] " + logLevel[level]["Type"] + ":" + message
print log
def saveLibrariesToPickle(Deps):
if os.path.isfile("libs.p"):
libraries = pickle.load(open("libs.p","rb"))
else:
libraries = []
for d in Deps:
libraries.append(d)
list(set(libraries))
pickle.dump(libraries,open("libs.p","wb"))
|
bsd-3-clause
| -860,799,496,070,189,000 | 23.444444 | 116 | 0.621212 | false |
oneonestar/CloudStorage
|
src/client/rsa.py
|
1
|
3905
|
"""
Load RSA keys for encryption/decryption/sign/verify.
"""
from ldap3 import *
import json
import base64
import log
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives.serialization import load_der_public_key, load_pem_private_key
from cryptography.hazmat.primitives import hashes
def get_cert(email, dump=False):
"""
Get E-cert from HKPost LDAP server
"""
# Connect to server
server = Server('ldap1.hongkongpost.gov.hk', get_info=ALL)
conn = Connection(server, auto_bind=True)
conn.start_tls()
# Get RSA cert
conn.search('o=hongkong post e-cert (personal),c=hk', '(sn='+email+'*)')
a = json.loads(conn.entries[-1].entry_to_json())['dn']
OU = a[a.find('OU=')+3:a.find('OU=')+13]
conn.search('EMAIL='+email+',OU='+str(OU)+
',o=hongkong post e-cert (personal),c=hk',
'(objectclass=*)', search_scope=LEVEL,
dereference_aliases=DEREF_BASE,
attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES])
cert = conn.entries[0].entry_get_raw_attribute("userCertificate;binary")[0]
# Cert info
if dump:
print(conn.entries[0].entry_get_dn())
print(base64.b64encode(cert))
# get x509 der public
pub_key = x509.load_der_x509_certificate(cert, default_backend()).public_key()
return pub_key
from cryptography.hazmat.primitives import serialization
def load_public_cert_from_file(filename):
"""
Load pem public key from file
"""
try:
with open(filename, "rb") as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend=default_backend()
)
return public_key
except Exception as e:
log.print_exception(e)
log.print_error("error", "failed to open file '%s'" % (r.text))
return False
def load_private_cert_from_file(filename):
'''
Load private cert form file
'''
with open(filename, 'rb') as f:
pem_data = f.read()
key = load_pem_private_key(pem_data, password=None, backend=default_backend())
return key
def encrypt_rsa(message, pub_key):
cipher = pub_key.encrypt(message, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
return cipher
#openssl x509 -in cert -inform der -noout -pubkey > a.pem
def decrypt_rsa(data, key):
plain = key.decrypt(data, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
return plain
def sign_rsa(data, private_key):
signer = private_key.signer(
padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
signer.update(data)
signature = signer.finalize()
return signature
def verify_rsa(data, signature, public_key):
verifier = public_key.verifier(
signature,
padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
verifier.update(data)
try:
verifier.verify()
except:
return False
return True
if __name__ == "__main__":
'''
For testing
'''
# Test encryption
public_key = get_cert("oneonestar@gmail.com")
cipher = encrypt_rsa(b'test', public_key)
private_key = load_private_cert_from_file("/home/star/.ssh/me.key.pem2")
print(decrypt_rsa(cipher, private_key))
# Test signature
data = b'abcdefg'
signature = sign_rsa(data, private_key)
print("signature: ", signature)
data = b'abc'
print("verify: ", verify_rsa(data, signature, public_key))
|
gpl-3.0
| 7,988,215,588,631,458,000 | 31.815126 | 98 | 0.626633 | false |
dkopecek/amplify
|
third-party/quex-0.65.2/quex/engine/analyzer/state/drop_out.py
|
1
|
1840
|
from quex.engine.analyzer.commands.core import CommandList, \
IfPreContextSetPositionAndGoto
def get_CommandList(TheAccepter, TheTerminalRouter):
"""If there is no stored acceptance involved, then one can directly
conclude from the pre-contexts to the acceptance_id. Then the drop-
out action can be described as a sequence of checks
# [0] Check [1] Position and Goto Terminal
if pre_context_32: input_p = x; goto terminal_893;
elif pre_context_32: goto terminal_893;
elif pre_context_32: input_p = x; goto terminal_893;
elif pre_context_32: goto terminal_893;
Such a configuration is considered trivial. No restore is involved.
RETURNS: None -- if not trivial
list((pre_context_id, TerminalRouterElement)) -- if trivial
"""
# If the 'last_acceptance' is not determined in this state, then it
# must bee derived from previous storages. We cannot simplify here.
if TheAccepter is None:
return CommandList(TheTerminalRouter)
elif not TheAccepter.content.has_acceptance_without_pre_context():
# If no pre-context is met, then 'last_acceptance' needs to be
# considered.
return CommandList(TheAccepter, TheTerminalRouter)
def router_element(TerminalRouter, AcceptanceId):
for x in TerminalRouter:
if x.acceptance_id == AcceptanceId: return x
assert False # There MUST be an element for each acceptance_id!
router = TheTerminalRouter.content
return CommandList.from_iterable(
IfPreContextSetPositionAndGoto(check.pre_context_id,
router_element(router, check.acceptance_id))
for check in TheAccepter.content
)
|
gpl-2.0
| -5,333,320,043,203,639,000 | 41.790698 | 83 | 0.65 | false |
cslarsen/wpm
|
wpm/record.py
|
1
|
1397
|
# -*- encoding: utf-8 -*-
"""
This file is part of the wpm software.
Copyright 2017, 2018 Christian Stigen Larsen
Distributed under the GNU Affero General Public License (AGPL) v3 or later. See
the file LICENSE.txt for the full license text. This software makes use of open
source software.
The quotes database is *not* covered by the AGPL!
"""
import collections
class Recorder(object):
"""Class for recording keystrokes."""
def __init__(self):
self.reset()
def add(self, elapsed, key, position, incorrect):
"""Adds a time stamp."""
self.elapsed.append(elapsed)
self.keys.append(key)
self.position.append((position, incorrect))
def reset(self):
"""Destroys all time stamps."""
self.elapsed = collections.deque()
self.keys = collections.deque()
self.position = collections.deque()
def __getitem__(self, index):
elapsed = self.elapsed[index]
key = self.keys[index]
position, incorrect = self.position[index]
return elapsed, key, position, incorrect
def __len__(self):
return len(self.elapsed)
class Playback(object):
def __init__(self, recorder):
self.recorder = recorder
self.index = 0
def next(self):
values = self.recorder[self.index]
self.index = (self.index + 1) % len(self.recorder)
return values
|
agpl-3.0
| 5,263,892,724,204,249,000 | 26.94 | 79 | 0.636364 | false |
tdjordan/tortoisegit
|
gitgtk/backout.py
|
1
|
4443
|
#
# backout.py - TortoiseHg's dialog for backing out changeset
#
# Copyright (C) 2008 Steve Borho <steve@borho.org>
# Copyright (C) 2007 TK Soh <teekaysoh@gmail.com>
#
import os
import sys
import gtk
import pango
from dialog import *
from hgcmd import CmdDialog
import histselect
class BackoutDialog(gtk.Window):
""" Backout effect of a changeset """
def __init__(self, root='', rev=''):
""" Initialize the Dialog """
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.root = root
self.set_title('Backout changeset - ' + rev)
self.set_default_size(600, 400)
self.notify_func = None
self.tbar = gtk.Toolbar()
self.tips = gtk.Tooltips()
sep = gtk.SeparatorToolItem()
sep.set_expand(True)
sep.set_draw(False)
tbuttons = [
self._toolbutton(gtk.STOCK_GO_BACK, 'Backout',
self._backout_clicked,
'Backout selected changeset'),
sep,
self._toolbutton(gtk.STOCK_CLOSE, 'Close',
self._close_clicked,
'Close Window')
]
for btn in tbuttons:
self.tbar.insert(btn, -1)
vbox = gtk.VBox()
self.add(vbox)
vbox.pack_start(self.tbar, False, False, 2)
# From: combo box
self.reventry = gtk.Entry()
self.reventry.set_text(rev)
self.browse = gtk.Button("Browse...")
self.browse.connect('clicked', self._btn_rev_clicked)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label('Revision to backout:'), False, False, 4)
hbox.pack_start(self.reventry, True, True, 4)
hbox.pack_start(self.browse, False, False, 4)
vbox.pack_start(hbox, False, False, 4)
self.logview = gtk.TextView(buffer=None)
self.logview.set_editable(True)
self.logview.modify_font(pango.FontDescription("Monospace"))
buffer = self.logview.get_buffer()
buffer.set_text('Backed out changeset: ' + rev)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_shadow_type(gtk.SHADOW_ETCHED_IN)
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow.add(self.logview)
scrolledwindow.set_border_width(4)
frame = gtk.Frame('Backout commit message')
frame.set_border_width(4)
frame.add(scrolledwindow)
self.tips.set_tip(frame,
'Commit message text for new changeset that reverses the'
' effect of the change being backed out.')
vbox.pack_start(frame, True, True, 4)
def _close_clicked(self, toolbutton, data=None):
self.destroy()
def set_notify_func(self, func, *args):
self.notify_func = func
self.notify_args = args
def _btn_rev_clicked(self, button):
""" select revision from history dialog """
rev = histselect.select(self.root)
if rev is not None:
self.reventry.set_text(rev)
buffer = self.logview.get_buffer()
buffer.set_text('Backed out changeset: ' + rev)
def _toolbutton(self, stock, label, handler, tip):
tbutton = gtk.ToolButton(stock)
tbutton.set_label(label)
tbutton.set_tooltip(self.tips, tip)
tbutton.connect('clicked', handler)
return tbutton
def _backout_clicked(self, button):
buffer = self.logview.get_buffer()
start, end = buffer.get_bounds()
cmdline = ['hg', 'backout', '--rev', self.reventry.get_text(),
'--message', buffer.get_text(start, end)]
dlg = CmdDialog(cmdline)
dlg.show_all()
dlg.run()
dlg.hide()
if self.notify_func:
self.notify_func(self.notify_args)
def run(root='', **opts):
# This dialog is intended to be launched by the changelog browser
# It's not expected to be used from hgproc or the command line. I
# leave this path in place for testing purposes.
dialog = BackoutDialog(root, 'tip')
dialog.show_all()
dialog.connect('destroy', gtk.main_quit)
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
opts = {}
opts['root'] = len(sys.argv) > 1 and sys.argv[1] or os.getcwd()
run(**opts)
|
gpl-2.0
| 521,526,216,457,904,060 | 33.44186 | 77 | 0.586766 | false |
haarcuba/testix
|
setup.py
|
1
|
1689
|
import setuptools
from distutils.core import setup
LONG_DESCRIPTION =\
"""
Testix is a Mocking framework for Python, meant to be used with [pytest](https://docs.pytest.org/en/latest/).
read the full docs at the [project's homepage](https://github.com/haarcuba/testix).
Testix is special because it allows you to specify what your mock objects do,
and it then enforces your specifications automatically. It also reduces (albeit
not entirely) mock setup. Other frameworks usually have a flow like this:
* setup mock
* let code do something with mock
* assert mock used in correct way
Testix flow is a bit different
* setup "top level" mock objects (`sock` in the following example)
* specify exactly what should happen to them using a scenario
And that's it.
"""
requires = [ 'pytest>~4.3.0', ]
tests_require = [ 'hypothesis>~4.7.19', 'pytest-asyncio' ]
setup(
name="testix",
packages = ["testix",],
version='5.0.1',
description = "Mocking framework Python with *exact* Scenarios",
author = "Yoav Kleinberger",
author_email = "haarcuba@gmail.com",
url = "https://github.com/haarcuba/testix",
keywords = ["mock", "mocking", "unittest", "python", "unit testing"],
install_requires=requires,
long_description = LONG_DESCRIPTION,
extras_require={
'testing': tests_require,
},
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
]
)
|
mit
| 7,304,769,653,540,143,000 | 32.78 | 109 | 0.675548 | false |
jan25/code_sorted
|
adventofcode/2020/4/2.py
|
1
|
2176
|
passports, lines = [], []
while True:
try:
line = input().strip()
if line == "":
passports.append(' '.join(lines))
lines = []
else:
lines.append(line)
except Exception:
if lines:
passports.append(' '.join(lines))
break
'''
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
'''
def ok(kv):
req = {'iyr', 'byr', 'pid', 'hcl', 'ecl', 'hgt', 'eyr'}
for k, _ in kv.items():
if k != 'cid':
if k not in req: return False
req.remove(k)
if req: return False
def chk(k, l, mini, maxi, val=None):
digs = val or kv[k]
if not digs.isdigit() or len(digs) != l: return False
val = int(digs)
return mini <= val <= maxi
if not chk('byr', 4, 1920, 2002): return False
if not chk('iyr', 4, 2010, 2020): return False
if not chk('eyr', 4, 2020, 2030): return False
hgt, unit = kv['hgt'][:-2], kv['hgt'][-2:]
if unit not in {'cm', 'in'}: return False
if unit == 'cm' and not chk(None, 3, 150, 193, hgt): return False
if unit == 'in' and not chk(None, 2, 59, 76, hgt): return False
pid = kv['pid']
if not pid.isdigit() or len(pid) != 9: return False
ecl = kv['ecl']
if ecl not in set('amb blu brn gry grn hzl oth'.split()): return False
hcl = kv['hcl']
if len(hcl) != 7 or hcl[0] != '#' or not hcl[1:].isalnum(): return False
return True
valid = 0
for p in passports:
kvs = dict()
for kv in p.split():
k, v = kv.split(':')
kvs[k] = v
if ok(kvs): valid += 1
print(valid)
|
unlicense
| -457,575,433,495,296,060 | 29.661972 | 76 | 0.568474 | false |
Duality4Y/py-bool-logic
|
LogicTests.py
|
1
|
19036
|
import baseLogic as logic
from LogicUtils import itot
from LogicUtils import ttoi
from LogicUtils import getRandomInts
from LogicUtils import invertTuple
from LogicUtils import states
from LogicUtils import appendTuple
def checkNot(logic):
state1 = logic(0)
state2 = logic(1)
print("0 : %d" % (state1))
print("1 : %d" % (state2))
print("")
def tableCheck(gate):
# table to check against two input signal.
table = [(0, 0), (0, 1), (1, 0), (1, 1)]
for state in table:
state1, state2 = state
output = gate(state)
print("%d %d : %d" % (state1, state2, output))
print("")
def printTestLogic():
print("Not:")
checkNot(logic.Not)
print("Or: ")
tableCheck(logic.Or)
print("And: ")
tableCheck(logic.And)
print("Xor: ")
tableCheck(logic.Xor)
print("Nor: ")
tableCheck(logic.Nor)
print("Nand: ")
tableCheck(logic.Nand)
print("Ior: ")
tableCheck(logic.Xnor)
def testMultiInputLogic():
length = 2
print("Input:\t Or: And: Nor: Nand: Xored: Xnored:")
for i in range(0, 2**length):
state = itot(i, length)
ored = logic.Or(state)
anded = logic.And(state)
nored = logic.Nor(state)
nanded = logic.Nand(state)
Xored = logic.Xor(state)
Xnored = logic.Xnor(state)
fmt = (state, ored, anded, nored, nanded, Xored, Xnored)
fmtstr = ("%s:\t\t%s\t%s\t%s\t%s\t%s\t%s" % fmt)
print(fmtstr)
length = 4
print("Input: Or: And: Nor: Nand: Xored: Xnored:")
for i in range(0, 2**length):
state = itot(i, length)
ored = logic.Or(state)
anded = logic.And(state)
nored = logic.Nor(state)
nanded = logic.Nand(state)
Xored = logic.Xor(state)
Xnored = logic.Xnor(state)
fmt = (state, ored, anded, nored, nanded, Xored, Xnored)
fmtstr = ("%s:\t%s\t%s\t%s\t%s\t%s\t%s" % fmt)
print(fmtstr)
def testHalfAdder():
from Circuits import HalfAdder
h1 = HalfAdder()
print("Halfadder: ")
print(" A |B | Co|S ")
for i in range(0, 4):
state = itot(i, 2)
h1.setinput(state)
print("%s-->%s" % (state, h1.getoutput()))
print("")
def testFullAdder():
from Circuits import FullAdder
f1 = FullAdder()
print("Fulladder: ")
print(" A |B |Ci| Co|S ")
# create a state and test on it.
for i in range(0, 8):
# generate four states
state = itot(i, 3)
f1.setinput(state)
print("%s-->%s" % (state, f1.getoutput()))
print("")
def testFourBitAdder():
from Circuits import FourBitAdder
adder = FourBitAdder()
bitlength = 4
print("FourBitadder: Addition")
for i in range(0, bitlength):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
adder.setinput(state1, state2, 0)
output = adder.getoutput()
if output[1]:
overflow = True
else:
overflow = False
answer = ttoi(output[0])
check = (answer == (left+right))
fmt = (ttoi(state1), ttoi(state2),
answer, check)
if overflow:
fmtstr = "%s + %s = %s :check:%s number overflow"
else:
fmtstr = "%s + %s = %s :check:%s"
print(fmtstr % fmt)
print("")
def testXBitAdder():
from Circuits import XBitAdder
bitlength = 8
print("max integer size: %d" % (bitlength))
adder = XBitAdder(bitlength)
print("Xbitadder: ")
# run 20 tests
for i in range(0, 6):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
adder.setinput(state1, state2, 0)
answer = ttoi(adder.getoutput()[0])
fmt = (ttoi(state1), ttoi(state2),
answer, (answer == (left+right)))
if adder.getoutput()[1]:
print("%s + %s = %s :check:%s integer overflow" % fmt)
else:
print("%s + %s = %s :check:%s" % fmt)
print("")
def testXBitSubtractor():
from Circuits import XBitSubtractor
bitlength = 8
print("integer size: %s" % bitlength)
subtractor = XBitSubtractor(bitlength)
print("XBitSubtractor unsigned: ")
for i in range(0, 6):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
subtractor.setinput(state1, state2, 0)
answer = ttoi(subtractor.getoutput()[0])
fmt = (ttoi(state1), ttoi(state2), answer)
print("%s - %s = %s" % fmt)
print("signed: ")
for i in range(0, 6):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
subtractor.setinput(state1, state2, 0)
output = subtractor.getoutput()
if output[1]:
answer = -(ttoi(invertTuple(output[0]))+1)
else:
answer = ttoi(output[0])
fmt = (ttoi(state1), ttoi(state2), answer)
print("%s - %s = %s " % fmt)
print("")
def testSubtractor():
import Circuits as cir
subtractor = cir.FourBitSubtractor()
bitlength = 4
print("FourBitSubtractor: ")
print("printing signed representation:")
for i in range(0, 5):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
subtractor.setinput(state1, state2, 0)
output = subtractor.getoutput()
# check signednes
if output[1]:
# if signed do this for the right negative number
# because if you don't you get to deal with unsigned number.
# and thus have overflow, and thus not really good to check for
# human readers, unless you like to think about it ofcourse .
answer = -(ttoi(invertTuple(output[0]))+1)
else:
answer = (ttoi(output[0]))
fmt = (left, right, answer)
fmtstr = "%s - %s = %s" % fmt
print(fmtstr)
print("printing unsigned representation: ")
for i in range(0, 5):
left, right = getRandomInts(bitlength)
state1 = itot(left, bitlength)
state2 = itot(right, bitlength)
subtractor.setinput(state1, state2, 0)
output = subtractor.getoutput()
answer = ttoi(output[0])
fmt = (left, right, answer)
fmtstr = "%s - %s = %s" % fmt
print(fmtstr)
print("")
def testLatch():
from Circuits import Latch
latch = Latch()
print("s-r latch: ")
while(True):
answer = raw_input("Input (S)et (R)eset (Q)uit:\n").lower()
if answer == "q":
break
elif answer == "s":
latch.setinput((1, 0))
print(latch.getoutput())
elif answer == "r":
latch.setinput((0, 1))
print(latch.getoutput())
def testGatedLatch():
from Circuits import GatedLatch
latch = GatedLatch()
enabled = 0
print("gated s-r latch: ")
while(True):
answer = raw_input("Input (S)et (R)eset (E)nable (Q)uit: \n").lower()
if answer == "q":
break
elif answer == "s":
latch.setinput((1, 0, enabled))
print(latch.getoutput())
elif answer == "r":
latch.setinput((0, 1, enabled))
print(latch.getoutput())
elif answer == "e":
enabled = logic.Not(enabled)
def testDataLatch():
from Circuits import DataLatch
latch = DataLatch()
enabled = 0
data = 0
print("Data latch: ")
while(True):
answer = raw_input("input (D)ata (E)nable (Q)uit: \n").lower()
if answer == "q":
break
elif answer == "d":
answer = raw_input("input 1 or 0 for data: \n").lower()
data = eval(answer)
latch.setinput((data, enabled))
print(latch.getoutput())
data = logic.Not(data)
elif answer == "e":
enabled = logic.Not(enabled)
def testPiPoRegister():
from Circuits import PiPoRegister
register = PiPoRegister()
print("Paralel in paralel out register.")
enabled = 0
# once and zero's alternating
data = 170
print("data:%s disabled:" % (itot(data, register.length),))
signal = (itot(data, register.length), enabled)
register.setinput(signal)
print(register.getoutput())
enabled = 1
print("data:%s enabled:" % (itot(data, register.length),))
signal = (itot(data, register.length), enabled)
register.setinput(signal)
print(register.getoutput())
enabled = 0
data = 0xF0
print("data:%s disabled: " % (itot(data, register.length),))
signal = (itot(data, register.length), enabled)
register.setinput(signal)
print(register.getoutput())
print("data:%s enabled:" % (itot(data, register.length),))
enabled = 1
signal = (itot(data, register.length), enabled)
register.setinput(signal)
print(register.getoutput())
print("")
def testXBitPiPoRegister():
from Circuits import XBitPiPoRegister
from getch import getch
import time
import sys
bitlength = 4
register = XBitPiPoRegister(length=bitlength)
print("\nvariable length parallel in parallel out register:")
data = itot(0, bitlength)
clock = 0
char = ''
while(char != u'q'):
if char >= u'0' and char <= u'9':
intdata = ttoi(data)
shifted = (ord(char) - ord(u'0') - 1)
intdata ^= (1 << shifted)
data = itot(intdata, bitlength)
elif char == u'c':
clock = logic.Not(clock)
signal = (data, clock)
register.setinput(signal)
output = register.getoutput()
fmt = (clock, data, output, time.time())
fmtstr = "Clock:%s Input:%s Output:%s %s\r" % fmt
sys.stdout.write(fmtstr)
char = getch()
def d_latch_vs_dms_latch():
from getch import getch
import sys
from Circuits import DataLatch, MSDataLatch
latch = DataLatch()
latch2 = MSDataLatch()
print("\ndifference between latch, and flipflop")
data, enabled = 1, 0
char = ' '
while(char != u'q'):
if char == u'2':
enabled = logic.Not(enabled)
elif char == u'1':
data = logic.Not(data)
latch.setinput((data, enabled))
latch2.setinput((data, enabled))
fmt = (data, enabled, latch.getoutput(), latch2.getoutput())
fmtstr = "\rdata:%s enabled:%s D-Latch:%s MSD-Latch:%s"
sys.stdout.write(fmtstr % fmt)
char = getch()
def testJKFlipflop():
from Circuits import JKFlipFlop
from getch import getch
import sys
flipflop = JKFlipFlop()
j, k, clock = 0, 0, 0
print("\nJK-flipflop")
print("")
char = ""
while(char != u'q'):
if(char == u'j'):
j = logic.Not(j)
elif(char == u'k'):
k = logic.Not(k)
elif(char == u'c'):
clock = logic.Not(clock)
signal = (j, k, clock)
flipflop.setinput(signal)
q, qn, = flipflop.getoutput()
fmt = (j, k, clock, q, qn)
fmtstr = "\rJ:%s K:%s clock:%s Q:%s Qn:%s"
sys.stdout.write(fmtstr % fmt)
char = getch()
def testTFlipflop():
from Circuits import TFlipFlop
from getch import getch
import sys
flipflop = TFlipFlop()
t, clock = 0, 0
print("\nToggle FlipFlop")
print("")
char = ""
while(char != u'q'):
if(char == u't'):
t = logic.Not(t)
elif(char == u'c'):
clock = logic.Not(clock)
signal = (t, clock)
flipflop.setinput(signal)
q, qn, = flipflop.getoutput()
fmt = (t, clock, q, qn)
fmtstr = "\rT:%s clock:%s q:%s qn:%s"
sys.stdout.write(fmtstr % fmt)
char = getch()
def testCounter():
from Circuits import Counter
from getch import getch
import sys
counter = Counter(length=8)
print("\ncounter:")
print("")
clock = 0
enabled = 1
char = ""
while(char != u'q'):
if(char == u'e'):
enabled = logic.Not(enabled)
elif(char == u'c'):
clock = logic.Not(clock)
signal = (clock, enabled)
counter.setinput(signal)
count = counter.getoutput()
fmt = (enabled, clock, count)
fmtstr = "\rEnabled:%s Clock:%s Count:%s"
sys.stdout.write(fmtstr % fmt)
char = getch()
def testXBitSiPoRegister():
from Circuits import XBitSiPoRegister
from getch import getch
import sys
register = XBitSiPoRegister(length=4)
print("XBit SiPo register:")
clock = 0
data = 0
char = ""
while(char != u'q'):
if(char == u'c'):
clock = logic.Not(clock)
elif(char == u'd'):
data = logic.Not(data)
signal = (data, clock)
register.setinput(signal)
output = register.getoutput()
fmt = (clock, data, output)
fmtstr = "\rClock:%s Data:%s Output:%s"
sys.stdout.write(fmtstr % fmt)
char = getch()
def sipoTesting():
from Circuits import SiPoRegister
register = SiPoRegister()
print("serial in parallel out")
print("")
data, clock = 1, 1
register.setinput((data, clock))
register.getoutput()
clock = 0
register.setinput((data, clock))
print(register.getoutput())
data, clock = 0, 1
register.setinput((data, clock))
register.getoutput()
clock = 0
register.setinput((data, clock))
print(register.getoutput())
data, clock = 1, 1
register.setinput((data, clock))
register.getoutput()
clock = 0
register.setinput((data, clock))
print(register.getoutput())
data, clock = 0, 1
register.setinput((data, clock))
register.getoutput()
clock = 0
register.setinput((data, clock))
print(register.getoutput())
def testOneBitMagnitudeComparator():
from Circuits import OneBitMagnitudeComparator as comp
comparator = comp()
length = 2
print("magnitude comparator test:")
print(" Ai|Bi Go|Eo|Lo")
for i in range(2**length):
state = itot(i, length)
comparator.setinput(state)
output = comparator.getoutput()
fmt = (state, output)
fmtstr = "%s %s" % fmt
print(fmtstr)
def testCascadeMagnitudeComparator():
from Circuits import CascadeOneBitMagnitudeComparator as comp
comparator = comp()
length = 5
print("cascade magnitude comparator:")
print(" Ai|Bi|Gi|Ei|Li Go|Eo|Lo")
for state in states(length):
comparator.setinput(state)
output = comparator.getoutput()
fmt = (state, output)
fmtstr = "%s %s" % fmt
print(fmtstr)
def testFourBitMagnitudeComparator():
from Circuits import FourBitMagnitudeComparator as Comp
comparator = Comp()
length = 4
print("Four bit magnitude comparator:")
for i in range(0, length*10):
left, right = getRandomInts(4)
state1 = itot(left, 4)
state2 = itot(right, 4)
comparator.setinput(state1, state2, (0, 0, 0))
output = comparator.getoutput()
print(left, right, output)
print("")
def testencoder4to2():
from Circuits import Encoder4to2
encoder = Encoder4to2()
inputs = 4
print("Encoder4to2: ")
for i in range(0, inputs):
inputed = (1 << i)
state = itot(inputed, inputs)
encoder.setinput(state)
output = encoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testencoder8to3():
from Circuits import Encoder8to3
encoder = Encoder8to3()
inputs = 8
print("Encoder8to3: ")
for i in range(0, inputs):
inputed = (1 << i)
state = itot(inputed, inputs)
encoder.setinput(state)
output = encoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testencoder():
from Circuits import Encoder
encoder = Encoder()
print("Encoder 8 to 3 with cascade input: ")
table = [(1, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 1, 1, 1, 1, 1, 1, 1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 1),
(0, 0, 0, 0, 0, 0, 0, 1, 1),
(0, 0, 0, 0, 0, 0, 1, 1, 1),
(0, 0, 0, 0, 0, 1, 1, 1, 1),
(0, 0, 0, 0, 1, 1, 1, 1, 1),
(0, 0, 0, 1, 1, 1, 1, 1, 1),
(0, 0, 1, 1, 1, 1, 1, 1, 1), ]
for state in table:
encoder.setinput(state)
output = encoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testdecoder2to4():
from Circuits import Decoder2to4
decoder = Decoder2to4()
# 2 plus enable
inputs = 3
print("Decoder2to4: ")
for state in states(inputs):
decoder.setinput(state)
output = decoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testdecoder3to8():
from Circuits import Decoder3to8
decoder = Decoder3to8()
# 3 inputs plus enable
inputs = 4
print("Decoder3to8: ")
for state in states(inputs):
decoder.setinput(state)
output = decoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testdecoder4to16():
from Circuits import Decoder4to16
decoder = Decoder4to16()
# 4 inputs plus enable
inputs = 5
print("Decoder4to16: ")
for state in states(inputs):
decoder.setinput(state)
output = decoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testdecoder5to32():
from Circuits import Decoder5to32
decoder = Decoder5to32()
# 5 inputs plus enabled
inputs = 6
print("Decoder5to32: ")
for state in states(inputs):
decoder.setinput(state)
output = decoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def testdecoder6to64():
from Circuits import Decoder6to64
decoder = Decoder6to64()
# 6 inputs plus enable
inputs = 7
print("Decoder6to64: ")
for state in states(inputs):
decoder.setinput(state)
output = decoder.getoutput()
fmt = (state, output)
fmtstr = "%s : %s" % fmt
print(fmtstr)
print("")
def runTests():
printTestLogic()
testHalfAdder()
testFullAdder()
testFourBitAdder()
testXBitAdder()
testPiPoRegister()
sipoTesting()
d_latch_vs_dms_latch()
testJKFlipflop()
testTFlipflop()
testCounter()
testOneBitMagnitudeComparator()
testCascadeMagnitudeComparator()
testFourBitMagnitudeComparator()
|
gpl-2.0
| 7,347,099,429,959,772,000 | 27.118168 | 77 | 0.563774 | false |
tanghaibao/jcvi
|
jcvi/annotation/stats.py
|
1
|
12645
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Collect gene statistics based on gff file:
Exon length, Intron length, Gene length, Exon count
"""
import os.path as op
import sys
import logging
from jcvi.utils.cbook import SummaryStats, percentage, human_size
from jcvi.utils.range import range_interleave
from jcvi.utils.table import tabulate
from jcvi.formats.fasta import Fasta
from jcvi.formats.gff import make_index
from jcvi.formats.base import DictFile, must_open
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, need_update
metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count")
class GeneStats(object):
def __init__(self, feat, conf_class, transcript_sizes, exons):
self.fid = feat.id
self.conf_class = conf_class
self.num_exons = len(exons)
self.num_transcripts = len(transcript_sizes)
self.locus_size = feat.stop - feat.start + 1
self.cum_transcript_size = sum(transcript_sizes)
self.cum_exon_size = sum((stop - start + 1) for (c, start, stop) in exons)
def __str__(self):
return "\t".join(
str(x)
for x in (
self.fid,
self.conf_class,
self.num_exons,
self.num_transcripts,
self.locus_size,
self.cum_transcript_size,
self.cum_exon_size,
)
)
def main():
actions = (
("stats", "collect gene statistics based on gff file"),
("statstable", "print gene statistics table based on output of stats"),
("histogram", "plot gene statistics based on output of stats"),
# summary tables of various styles
("genestats", "print detailed gene statistics"),
("summary", "print detailed gene/exon/intron statistics"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def gc(seqs):
gc = total = 0
for s in seqs:
s = s.upper()
gc += s.count("G") + s.count("C")
total += sum(s.count(x) for x in "ACGT")
return percentage(gc, total, precision=0, mode=-1)
def summary(args):
"""
%prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC
"""
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, ref = args
s = Fasta(ref)
g = make_index(gff_file)
geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC
for f in g.features_of_type("gene"):
fid = f.id
fseq = s.sequence({"chr": f.chrom, "start": f.start, "stop": f.stop})
geneseqs.append(fseq)
exons = set(
(c.chrom, c.start, c.stop)
for c in g.children(fid, 2)
if c.featuretype == "exon"
)
exons = list(exons)
for chrom, start, stop in exons:
fseq = s.sequence({"chr": chrom, "start": start, "stop": stop})
exonseqs.append(fseq)
introns = range_interleave(exons)
for chrom, start, stop in introns:
fseq = s.sequence({"chr": chrom, "start": start, "stop": stop})
intronseqs.append(fseq)
r = {} # Report
for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)):
tsizes = [len(x) for x in tseqs]
tsummary = SummaryStats(tsizes, dtype="int")
r[t, "Number"] = tsummary.size
r[t, "Average size (bp)"] = tsummary.mean
r[t, "Median size (bp)"] = tsummary.median
r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb")
r[t, "% of genome"] = percentage(
tsummary.sum, s.totalsize, precision=0, mode=-1
)
r[t, "% GC"] = gc(tseqs)
print(tabulate(r), file=sys.stderr)
def genestats(args):
"""
%prog genestats gffile
Print summary stats, including:
- Number of genes
- Number of single-exon genes
- Number of multi-exon genes
- Number of distinct exons
- Number of genes with alternative transcript variants
- Number of predicted transcripts
- Mean number of distinct exons per gene
- Mean number of transcripts per gene
- Mean gene locus size (first to last exon)
- Mean transcript size (UTR, CDS)
- Mean exon size
Stats modeled after barley genome paper Table 1.
A physical, genetic and functional sequence assembly of the barley genome
"""
p = OptionParser(genestats.__doc__)
p.add_option("--groupby", default="conf_class", help="Print separate stats groupby")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(gff_file,) = args
gb = opts.groupby
g = make_index(gff_file)
tf = gff_file + ".transcript.sizes"
if need_update(gff_file, tf):
fw = open(tf, "w")
for feat in g.features_of_type("mRNA"):
fid = feat.id
conf_class = feat.attributes.get(gb, "all")
tsize = sum(
(c.stop - c.start + 1)
for c in g.children(fid, 1)
if c.featuretype == "exon"
)
print("\t".join((fid, str(tsize), conf_class)), file=fw)
fw.close()
tsizes = DictFile(tf, cast=int)
conf_classes = DictFile(tf, valuepos=2)
logging.debug("A total of {0} transcripts populated.".format(len(tsizes)))
genes = []
for feat in g.features_of_type("gene"):
fid = feat.id
transcripts = [c.id for c in g.children(fid, 1) if c.featuretype == "mRNA"]
if len(transcripts) == 0:
continue
transcript_sizes = [tsizes[x] for x in transcripts]
exons = set(
(c.chrom, c.start, c.stop)
for c in g.children(fid, 2)
if c.featuretype == "exon"
)
conf_class = conf_classes[transcripts[0]]
gs = GeneStats(feat, conf_class, transcript_sizes, exons)
genes.append(gs)
r = {} # Report
distinct_groups = set(conf_classes.values())
for g in distinct_groups:
num_genes = num_single_exon_genes = num_multi_exon_genes = 0
num_genes_with_alts = num_transcripts = num_exons = max_transcripts = 0
cum_locus_size = cum_transcript_size = cum_exon_size = 0
for gs in genes:
if gs.conf_class != g:
continue
num_genes += 1
if gs.num_exons == 1:
num_single_exon_genes += 1
else:
num_multi_exon_genes += 1
num_exons += gs.num_exons
if gs.num_transcripts > 1:
num_genes_with_alts += 1
if gs.num_transcripts > max_transcripts:
max_transcripts = gs.num_transcripts
num_transcripts += gs.num_transcripts
cum_locus_size += gs.locus_size
cum_transcript_size += gs.cum_transcript_size
cum_exon_size += gs.cum_exon_size
mean_num_exons = num_exons * 1.0 / num_genes
mean_num_transcripts = num_transcripts * 1.0 / num_genes
mean_locus_size = cum_locus_size * 1.0 / num_genes
mean_transcript_size = cum_transcript_size * 1.0 / num_transcripts
mean_exon_size = cum_exon_size * 1.0 / num_exons if num_exons != 0 else 0
r[("Number of genes", g)] = num_genes
r[("Number of single-exon genes", g)] = percentage(
num_single_exon_genes, num_genes, mode=1
)
r[("Number of multi-exon genes", g)] = percentage(
num_multi_exon_genes, num_genes, mode=1
)
r[("Number of distinct exons", g)] = num_exons
r[("Number of genes with alternative transcript variants", g)] = percentage(
num_genes_with_alts, num_genes, mode=1
)
r[("Number of predicted transcripts", g)] = num_transcripts
r[("Mean number of distinct exons per gene", g)] = mean_num_exons
r[("Mean number of transcripts per gene", g)] = mean_num_transcripts
r[("Max number of transcripts per gene", g)] = max_transcripts
r[("Mean gene locus size (first to last exon)", g)] = mean_locus_size
r[("Mean transcript size (UTR, CDS)", g)] = mean_transcript_size
r[("Mean exon size", g)] = mean_exon_size
fw = must_open(opts.outfile, "w")
print(tabulate(r), file=fw)
fw.close()
def statstable(args):
"""
%prog statstable *.gff
Print gene statistics table.
"""
p = OptionParser(statstable.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
gff_files = args
for metric in metrics:
logging.debug("Parsing files in `{0}`..".format(metric))
table = {}
for x in gff_files:
pf = op.basename(x).split(".")[0]
numberfile = op.join(metric, pf + ".txt")
ar = [int(x.strip()) for x in open(numberfile)]
sum = SummaryStats(ar).todict().items()
keys, vals = zip(*sum)
keys = [(pf, x) for x in keys]
table.update(dict(zip(keys, vals)))
print(tabulate(table), file=sys.stderr)
def histogram(args):
"""
%prog histogram *.gff
Plot gene statistics based on output of stats. For each gff file, look to
see if the metrics folder (i.e. Exon_Length) contains the data and plot
them.
"""
from jcvi.graphics.histogram import histogram_multiple
p = OptionParser(histogram.__doc__)
p.add_option(
"--bins",
dest="bins",
default=40,
type="int",
help="number of bins to plot in the histogram",
)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
gff_files = args
# metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count")
colors = ("red", "green", "blue", "black")
vmaxes = (1000, 1000, 4000, 20)
xlabels = ("bp", "bp", "bp", "number")
for metric, color, vmax, xlabel in zip(metrics, colors, vmaxes, xlabels):
logging.debug("Parsing files in `{0}`..".format(metric))
numberfiles = [
op.join(metric, op.basename(x).split(".")[0] + ".txt") for x in gff_files
]
histogram_multiple(
numberfiles,
0,
vmax,
xlabel,
metric,
bins=opts.bins,
facet=True,
fill=color,
prefix=metric + ".",
)
def stats(args):
"""
%prog stats infile.gff
Collect gene statistics based on gff file. There are some terminology issues
here and so normally we call "gene" are actually mRNA, and sometimes "exon"
are actually CDS, but they are configurable.
Thee numbers are written to text file in four separate folders,
corresponding to the four metrics:
Exon length, Intron length, Gene length, Exon count
With data written to disk then you can run %prog histogram
"""
p = OptionParser(stats.__doc__)
p.add_option("--gene", default="mRNA", help="The gene type")
p.add_option("--exon", default="CDS", help="The exon type")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(gff_file,) = args
g = make_index(gff_file)
exon_lengths = []
intron_lengths = []
gene_lengths = []
exon_counts = []
for feat in g.features_of_type(opts.gene):
exons = []
for c in g.children(feat.id, 1):
if c.featuretype != opts.exon:
continue
exons.append((c.chrom, c.start, c.stop))
introns = range_interleave(exons)
feat_exon_lengths = [(stop - start + 1) for (chrom, start, stop) in exons]
feat_intron_lengths = [(stop - start + 1) for (chrom, start, stop) in introns]
exon_lengths += feat_exon_lengths
intron_lengths += feat_intron_lengths
gene_lengths.append(sum(feat_exon_lengths))
exon_counts.append(len(feat_exon_lengths))
a = SummaryStats(exon_lengths)
b = SummaryStats(intron_lengths)
c = SummaryStats(gene_lengths)
d = SummaryStats(exon_counts)
for x, title in zip((a, b, c, d), metrics):
x.title = title
print(x, file=sys.stderr)
prefix = gff_file.split(".")[0]
for x in (a, b, c, d):
dirname = x.title
mkdir(dirname)
txtfile = op.join(dirname, prefix + ".txt")
x.tofile(txtfile)
if __name__ == "__main__":
main()
|
bsd-2-clause
| 5,609,852,184,259,907,000 | 31.844156 | 88 | 0.573507 | false |
globocom/tapioca
|
tests/acceptance/test_rest_api.py
|
1
|
13821
|
import re
import logging
from json import loads, dumps
from xml.etree import ElementTree
from unittest import TestCase
import tornado.web
from tornado.testing import AsyncHTTPTestCase
from tapioca import TornadoRESTful, ResourceHandler, \
ResourceDoesNotExist, JsonEncoder, JsonpEncoder, HtmlEncoder
from tests.support import AsyncHTTPClientMixin, assert_response_code
FAKE_DATABASE = None
class XmlEncoder(object):
mimetype = 'text/xml'
extension = 'xml'
def __init__(self, handler):
self.handler = handler
def encode(self, resource):
data = '{}'
if type(resource) == list:
data = '<comments>{}</comments>'
return data.format('<comment id="{id}">{text}</comment>'
.format(**resource))
def decode(self, data):
doc = ElementTree.fromstring(data)
new_data = {
'text': doc.text
}
resource_id = doc.get('id', None)
if not resource_id is None:
new_data['id'] = resource_id
return new_data
class AddMoreEncodersMixin:
encoders = (JsonEncoder, JsonpEncoder, XmlEncoder, HtmlEncoder,)
class ImplementAllRequiredMethodsInApiHandler:
def _find(self, cid):
ms = [x for x in FAKE_DATABASE if x['id'] == cid]
if ms:
return ms[0]
else:
raise ResourceDoesNotExist()
def create_model(self, callback):
model = self.load_data()
model['id'] = max([int(x['id']) for x in FAKE_DATABASE]) + 1
FAKE_DATABASE.append(model)
logging.debug('created {0!s}'.format(model))
url_to_instance = '{r.protocol}://{r.host}{r.path}/{id:d}'.format(
r=self.request, id=model['id'])
callback(model, url_to_instance)
def get_collection(self, callback):
callback(FAKE_DATABASE)
def get_model(self, cid, callback, *args):
callback(self._find(int(cid)))
def update_model(self, cid, callback, *args):
model = self.load_data()
model['id'] = int(cid)
logging.debug('updating {0!s} {1!s}'.format(str(cid), str(model)))
FAKE_DATABASE[FAKE_DATABASE.index(self._find(int(cid)))] = model
url_to_instance = '{r.protocol}://{r.host}{r.path}'.format(
r=self.request)
callback(url_to_instance)
def delete_model(self, cid, callback):
logging.debug('deleting')
item = self._find(int(cid))
FAKE_DATABASE.remove(item)
callback()
class FullTestHandler(
ImplementAllRequiredMethodsInApiHandler,
AddMoreEncodersMixin,
ResourceHandler):
pass
class RespondOnlyJsonResourceHandler(
ImplementAllRequiredMethodsInApiHandler,
ResourceHandler):
pass
class BaseApiHandlerTestCase(AsyncHTTPTestCase, AsyncHTTPClientMixin):
def get_app(self):
api = TornadoRESTful(version='v1', base_url='http://api.tapioca.com')
api.add_resource('api', FullTestHandler)
application = tornado.web.Application(api.get_url_mapping())
return application
def setUp(self, *args, **kw):
super(BaseApiHandlerTestCase, self).setUp(*args, **kw)
global FAKE_DATABASE
FAKE_DATABASE = [dict(id=i, text='X' * i) for i in range(10)]
def test_get_request_to_list_all_resource_instances(self):
response = self.get('/api')
assert_response_code(response, 200)
resources = loads(response.body.decode('utf-8'))
number_of_items = len(resources)
assert number_of_items == 10, 'should return 10 resources but returned {0:d}'.format(number_of_items)
for item in resources:
assert 'id' in item, 'should have the key \'id\' in the resource instance'
assert 'text' in item, 'should have the \'text\' in the resource instance'
def test_get_a_specific_resource_using_get_request(self):
response = self.get('/api/3')
assert_response_code(response, 200)
resource = loads(response.body.decode('utf-8'))
assert 'id' in resource, 'should have the key \'id\' in the resource instance {0!s}'.format(resource)
assert 'text' in resource, 'should have the \'text\' in the resource instance {0!s}'.format(resource)
def test_get_a_resource_that_does_not_exist(self):
response = self.get('/api/30')
assert_response_code(response, 404)
def test_post_to_create_a_new_resource(self):
a_new_item = {
'text': 'this is my new item'
}
response = self.post(self.get_url('/api'), dumps(a_new_item))
assert_response_code(response, 201)
self.assertRegexpMatches(response.headers['Location'], r'http://localhost:\d+/api/\d+')
assert loads(response.body)['text'] == 'this is my new item'
def test_put_to_update_an_existing_resource(self):
response = self.get('/api/1')
assert_response_code(response, 200)
resource = loads(response.body.decode('utf-8'))
resource['comment'] = 'wow!'
response = self.put(self.get_url('/api/1'), dumps(resource))
assert_response_code(response, 204)
response = self.get('/api/1')
resource = loads(response.body.decode('utf-8'))
assert 'comment' in resource
assert resource['comment'] == 'wow!'
def test_try_to_update_a_resource_that_does_not_exist(self):
response = self.put(self.get_url('/api/30'), dumps(dict(text='not exist')))
assert_response_code(response, 404)
def test_delete_method_to_destroy_a_resource(self):
response = self.delete(self.get_url('/api/1'))
assert_response_code(response, 200)
response = self.delete(self.get_url('/api/1'))
assert_response_code(response, 404)
def test_return_resource_as_xml(self):
url = self.get_url('/api/1')
response = self._fetch(url, 'GET', headers=dict(Accept='text/xml'))
assert_response_code(response, 200)
assert 'text/xml' in response.headers['Content-Type'], 'the content-type should be text/xml but it was {0}'.format(response.headers['Content-Type'])
assert response.body == b'<comment id="1">X</comment>'
def test_choose_response_type_based_on_the_accept_header(self):
url = self.get_url('/api/1')
response = self._fetch(url, 'GET', headers={'Accept':'application/json, text/xml'})
assert_response_code(response, 200)
assert 'application/json' in response.headers['Content-Type'], 'the content-type should be application/json but it was {0}'.format(response.headers['Content-Type'])
def test_create_new_instance_of_the_resource_with_content_type_text_xml(self):
a_new_item ='<comment>meu comentario</comment>'
response = self._fetch(self.get_url('/api'), 'POST', headers={'Content-Type': 'text/xml'}, body=a_new_item)
assert_response_code(response, 201)
# gets the new instance
response = self._fetch(response.headers['Location'], 'GET', headers={'Accept': 'text/xml'})
assert 'text/xml' in response.headers['Content-Type'], 'the content-type should be text/xml but it was {0}'.format(response.headers['Content-Type'])
doc = ElementTree.fromstring(response.body)
assert doc.tag == 'comment', 'the tag should be "comment" but it was {0}'.format(doc.tag)
assert doc.text == 'meu comentario', 'the comment text should be "meu comentario" but it was {0}'.format(doc.text)
assert doc.get('id') == '10', 'the id should be 11 but it was {0}'.format(doc.get('id'))
def test_get_resource_with_content_type_text_xml(self):
response = self._fetch(self.get_url('/api/2'), 'GET', headers={'Accept': 'text/xml'})
assert 'text/xml' in response.headers['Content-Type'], 'the content-type should be text/xml but it was {0}'.format(response.headers['Content-Type'])
doc = ElementTree.fromstring(response.body)
assert doc.tag == 'comment', 'the tag should be "comment" but it was {0}'.format(doc.tag)
assert doc.text == 'XX', 'the comment text should be "XX" but it was {0}'.format(doc.text)
def test_update_new_instance_of_the_resource_with_content_type_text_xml(self):
an_updated_item ='<comment id="2">meu comentario</comment>'
response = self._fetch(self.get_url('/api/2'), 'PUT', headers={'Content-Type': 'text/xml'}, body=an_updated_item)
assert_response_code(response, 204)
# get the resource to verify if it was updated
response = self._fetch(response.headers['Location'], 'GET', headers={'Accept': 'text/xml'})
assert 'text/xml' in response.headers['Content-Type'], 'the content-type should be text/xml but it was {0}'.format(response.headers['Content-Type'])
doc = ElementTree.fromstring(response.body)
assert doc.tag == 'comment', 'the tag should be "comment" but it was {0}'.format(doc.tag)
assert doc.text == 'meu comentario', 'the comment text should be "meu comentario" but it was {0}'.format(doc.text)
def test_jsonp_response_when_accept_textjavascript(self):
response = self._fetch(
self.get_url('/api/?callback=my_callback'), 'GET', headers={
'Accept': 'text/javascript'
})
assert_response_code(response, 200)
assert response.body.decode('utf-8').startswith('my_callback(')
def test_use_the_default_encoder(self):
response = self._fetch(
self.get_url('/api/?callback=my_callback'), 'GET', headers={
'Accept': 'lol/cat'
})
assert_response_code(response, 200)
def test_show_content_as_html_when_requested_by_browser(self):
CHROME_ACCEPT_HEADER = 'text/html,application/xhtml+xml,application/xm'\
'l;q=0.9,*/*;q=0.8'
response = self._fetch(
self.get_url('/api/'), 'GET', headers={
'Accept': CHROME_ACCEPT_HEADER
})
assert_response_code(response, 200)
assert '<body>' in response.body.decode('utf-8')
def test_should_return_type_json_as_specified_in_url(self):
response = self.get('/api/1.json')
assert_response_code(response, 200)
data = loads(response.body.decode('utf-8'))
assert 'id' in data.decode('utf-8')
def test_should_return_type_xml_as_specified_in_url(self):
response = self.get('/api/1.xml')
assert_response_code(response, 200)
assert '</comment>' in response.body.decode('utf-8')
def test_should_raise_404_when_extension_is_not_found(self):
response = self.get('/api/1.rb')
assert_response_code(response, 404)
def test_should_return_type_json_as_specified_in_url(self):
response = self.get('/api/1.js?callback=myCallbackFooBar')
assert_response_code(response, 200)
assert response.body.decode('utf-8').startswith('myCallbackFooBar(')
def test_should_return_the_default_callback_when_i_not_specify_in_my_request(self):
response = self.get('/api/1.js')
assert_response_code(response, 200)
assert re.match(b'^[\w_]+\(.*', response.body), response.body.decode('utf-8')
def test_should_return_the_default_callback_when_i_not_specify_in_my_request(self):
response = self.get('/api/1.js')
assert_response_code(response, 200)
assert re.match(b'^defaultCallback\(.*', response.body), response.body.decode('utf-8')
class WithDefaultCallbackHandler(ResourceHandler):
default_callback_name = 'thePersonalizedCallback'
def get_model(self, cid, callback, *args):
callback({})
class JsonEncoderDefineAnDefaultCallbackTestCase(AsyncHTTPTestCase,\
AsyncHTTPClientMixin):
def get_app(self):
api = TornadoRESTful(cross_origin_enabled=True)
api.add_resource('api', WithDefaultCallbackHandler)
application = tornado.web.Application(api.get_url_mapping())
return application
def test_should_return_the_default_callback_when_i_not_specify_in_my_request(self):
response = self.get('/api/1.js')
assert_response_code(response, 200)
assert re.match(b'^thePersonalizedCallback\(.*', response.body), response.body.decode('utf-8')
def test_should_return_with_the_callback_name_i_choose(self):
response = self.get('/api/1.js?callback=fooBar')
assert_response_code(response, 200)
assert response.body.decode('utf-8').startswith('fooBar(')
def test_should_return_cross_origin_header(self):
response = self.get('/api/1.js?callback=fooBar')
assert_response_code(response, 200)
assert 'Access-Control-Allow-Origin' in response.headers
assert response.headers['Access-Control-Allow-Origin'] == '*'
class ResourceHandlerWithoutImplementationTestCase(AsyncHTTPTestCase,\
AsyncHTTPClientMixin):
def get_app(self):
api = TornadoRESTful()
api.add_resource('api', ResourceHandler)
application = tornado.web.Application(api.get_url_mapping())
return application
def test_try_to_create_a_resource(self):
response = self.post(self.get_url('/api'), dumps(dict(text='nice')))
assert_response_code(response, 404)
def test_try_to_list_resources(self):
response = self.get('/api')
assert_response_code(response, 404)
def test_try_to_get_instance(self):
response = self.get('/api/1')
assert_response_code(response, 404)
def test_try_to_update_a_resource(self):
response = self.put(self.get_url('/api/1'), dumps(dict(text='nice')))
assert_response_code(response, 404)
def test_try_to_delete_a_resource(self):
response = self.delete(self.get_url('/api/1'))
assert_response_code(response, 404)
|
mit
| -1,293,191,428,093,595,100 | 41.526154 | 172 | 0.638159 | false |
skidekeersmaecker/raspi-cursus
|
labo4/mqtt.py
|
1
|
2464
|
import RPi.GPIO as GPIO
import paho.mqtt.client as mqtt
import time
GPIO.setmode(GPIO.BOARD)
#leds
ledKitchen = 16
ledLivingroom = 18
GPIO.setup(ledKitchen, GPIO.OUT)
GPIO.output(ledKitchen, False)
GPIO.setup(ledLivingroom, GPIO.OUT)
GPIO.output(ledLivingroom, False)
#buttons
buttonMaster = 11
buttonKitchen = 13
buttonLivingroom = 15
GPIO.setup(buttonMaster, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(buttonKitchen, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(buttonLivingroom, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def callMaster(channel):
print("called master button")
write('master')
def callKitchen(channel):
print("called kitchen button")
write('kitchen')
def callLivingroom(channel):
print("called livingroom button")
write('livingroom')
#WIFI:
def write(data):
mqttc.publish("home/groundfloor/kitchen/lights/lightx", str(data), qos=1)
#mqttc.publish("home/groundfloor/livingroom/lights/lightx", str(data), qos=1)
def on_connect(mqttc, obj, rc):
print("connected: " + str(rc))
mqttc.subscribe("home/groundfloor/livingroom/lights/lightx")
#mqttc.subscribe("home/groundfloor/kitchen/lights/lightx")
def on_message(mqttc, obj, msg):
print("msg: " + str(msg.payload) + " at topic: " + msg.topic + " with QoS: " + str(msg.qos))
if(str(msg.payload) == "b'master'"):
GPIO.output(ledKitchen, False)
GPIO.output(ledLivingroom, False)
if(str(msg.payload) == "b'kitchen'"):
GPIO.output(ledKitchen, not GPIO.input(ledKitchen))
if(str(msg.payload) == "b'livingroom'"):
GPIO.output(ledLivingroom, not GPIO.input(ledLivingroom))
def on_publish(mqttc, obj, mid):
print("mid: " + str(mid))
GPIO.add_event_detect(buttonMaster, GPIO.FALLING, callback=callMaster, bouncetime=300)
GPIO.add_event_detect(buttonKitchen, GPIO.FALLING, callback=callKitchen, bouncetime=300)
GPIO.add_event_detect(buttonLivingroom, GPIO.FALLING, callback=callLivingroom, bouncetime=300)
def main():
global mqttc
try:
mqttc = mqtt.Client()
mqttc.on_publish = on_publish
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.username_pw_set("bxaxrkah", "1zQixURXUYuB")
mqttc.connect("m10.cloudmqtt.com", 13915)
mqttc.loop_forever()
except KeyboardInterrupt:
print("exiting program")
mqttc.loop_stop()
mqttc.disconnect()
GPIO.cleanup()
if __name__ == "__main__":
main()
|
mit
| 8,725,125,909,546,020,000 | 29.419753 | 96 | 0.689529 | false |
andrei4ka/fuel-web-redhat
|
nailgun/nailgun/objects/serializers/release.py
|
1
|
1707
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.objects.serializers.base import BasicSerializer
class ReleaseSerializer(BasicSerializer):
fields = (
"id",
"name",
"version",
"can_update_from_versions",
"description",
"operating_system",
"modes_metadata",
"roles",
"roles_metadata",
"wizard_metadata",
"state",
"attributes_metadata"
)
@classmethod
def serialize(cls, instance, fields=None):
from nailgun.objects.release import Release
release_dict = \
super(ReleaseSerializer, cls).serialize(instance, fields)
release_dict["is_deployable"] = Release.is_deployable(instance)
# we always want to get orchestrator data even it's a default one
release_dict["orchestrator_data"] = \
Release.get_orchestrator_data_dict(instance)
return release_dict
class ReleaseOrchestratorDataSerializer(BasicSerializer):
fields = (
"repo_metadata",
"puppet_manifests_source",
"puppet_modules_source"
)
|
apache-2.0
| 8,583,087,739,334,395,000 | 28.431034 | 78 | 0.650264 | false |
quesnel/pyvle
|
src/pyvle.py
|
1
|
16780
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: pyvle.py
# Author: The VLE Development Team.
# Brief: Python wrapper of VLE
#
# VLE Environment - the multimodeling and simulation environment
# This file is a part of the VLE environment (http://vle-project.org)
# Copyright (C) 2003 - 2017 The VLE Development Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## pyvalue to vlevalue
## fonction (if specified types vleTUPLE, vleTABLE, vleXML):
## bool -> BOOLEAN
## int -> INTEGER
## float -> DOUBLE
## str -> STRING
## VleXML (embeds str) -> XMLTYPE
## list -> SET
## dict (with str keys) -> MAP
## VleTuple (embeds list of float) -> TUPLE
## VleTable (embeds list of list of float) -> TABLE
## VleMatrix (embeds list of list) -> MATRIX
##
##
## BOOLEAN -> bool
## INTEGER -> int
## DOUBLE -> float
## STRING -> str
## XMLTYPE -> VleXML
## SET -> list
## MAP -> dict
## TUPLE -> VleTuple
## TABLE -> VleTable
## MATRIX -> VleMatrix
##
import libpyvle
class VleValue:
_val_type = None
def __init__(self, x):
if self._val_type is None:
raise NotImplementedError
if isinstance(x, self._val_type):
self.val = x
else:
raise ValueError(u'Can\'t embed type %s in %s' % (type(x)),
self.__class__.__name__)
def __repr__(self):
return "<%s(%r)>" % (self.__class__.__name__, self.val)
class VleXML(VleValue):
_val_type = str
class VleTuple(VleValue):
_val_type = list
class VleTable(VleValue):
_val_type = list
class VleMatrix(VleValue):
_val_type = list
class Vle:
def __init__(self, file_, package = ""):
if isinstance(file_, basestring):
# assume file_ is a filename
self.vpz = libpyvle.open_pkg(package, file_)
self.filename = file_
else:
# assume file_ is a file object
if package == "":
self.vpz = libpyvle.from_buffer(file_.read())
else:
self.vpz = libpyvle.from_buffer_pkg(package, file_.read())
self.filename = file_.name if hasattr(file_, "name") else None
def save(self, file_):
if isinstance(file_, basestring):
# assume file_ is a filename
libpyvle.save(self.vpz, file_)
else:
# assume file_ is a file object
file_.write(libpyvle.save_buffer(self.vpz))
# name of experiments
def setName(self, name):
libpyvle.experiment_set_name(self.vpz, name)
# begin of experiments
def setBegin(self, date):
libpyvle.experiment_set_begin(self.vpz, date)
def getBegin(self):
return libpyvle.experiment_get_begin(self.vpz)
# duration of experiments
def setDuration(self, value):
libpyvle.experiment_set_duration(self.vpz, value)
def getDuration(self):
return libpyvle.experiment_get_duration(self.vpz)
# execution of experiments
def run(self):
return libpyvle.run(self.vpz)
def runMatrix(self):
return libpyvle.run_matrix(self.vpz)
def runManager(self):
return libpyvle.run_manager(self.vpz)
def runManagerMatrix(self):
return libpyvle.run_manager_matrix(self.vpz)
def runManagerThread(self, th):
return libpyvle.run_manager_thread(self.vpz, th)
def runManagerThreadMatrix(self, th):
return libpyvle.run_manager_thread_matrix(self.vpz, th)
def runManagerCluster(self):
return libpyvle.run_manager_cluster(self.vpz)
def runManagerClusterMatrix(self):
return libpyvle.run_manager_cluster_matrix(self.vpz)
# conditions
def listConditions(self):
return libpyvle.condition_list(self.vpz)
def listConditionPorts(self, name):
return libpyvle.condition_port_list(self.vpz, name)
def clearConditionPort(self, name, port):
libpyvle.condition_port_clear(self.vpz, name, port);
def getConditionPortValues(self, name, port):
return libpyvle.condition_show(self.vpz, name, port)
def getConditionPortValue(self, name, port, i):
return libpyvle.condition_get_value(self.vpz, name, port, i)
# conditions add
def addRealCondition(self, name, port, v):
if isinstance(v,float):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to float' % type(v))
def addIntegerCondition(self, name, port, v):
if isinstance(v,int):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to int' % type(v))
def addStringCondition(self, name, port, v):
if isinstance(v,str):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to str' % type(v))
def addBooleanCondition(self, name, port, v):
if isinstance(v,bool):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to bool' % type(v))
def addMapCondition(self, name, port, v):
if isinstance(v,dict):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to dict' % type(v))
def addSetCondition(self, name, port, v):
if isinstance(v,list):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to list' % type(v))
def addMatrixCondition(self, name, port, v):
if isinstance(v,VleMatrix):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to VleMatrix' % type(v))
def addTableCondition(self, name, port, v):
if isinstance(v,VleTable):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to VleTable' % type(v))
def addTupleCondition(self, name, port, v):
if isinstance(v,VleTuple):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to VleTuple' % type(v))
def addXMLCondition(self, name, port, v):
if isinstance(v,VleXML):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
else:
raise ValueError(u'Can\'t convert type %s to VleXML' % type(v))
def addValueCondition(self, name, port, v):
libpyvle.condition_add_value(self.vpz, name, port, to_value(v))
#################
## pyvle specific
def createCondition(self, name, **ports):
# ports is an optional list a keyworgs args :
# portname = value, ...
libpyvle.condition_create(self.vpz, name)
for portname, val in ports.iteritems():
libpyvle.condition_add_value(self.vpz, name,
portname, to_value(val))
def setConditionValue(self, name, port, value, type, i):
libpyvle.condition_set_value(self.vpz, name, port, value, type, i)
def setConditionPortValue(self, name, port, value, i):
libpyvle.condition_set_port_value(self.vpz, name, port, to_value(value), i)
def getConditionSetValue(self, name, port):
return libpyvle.condition_get_setvalue(self.vpz, name, port)
def getConditionValueType(self, name, port, i):
return libpyvle.condition_get_value_type(self.vpz, name, port, i)
def delConditionValue(self, name, port, i):
libpyvle.condition_delete_value(self.vpz, name, port, i)
def listAtomicModelConditions(self, name):
return libpyvle.atomic_model_conditions_list(self.vpz, name)
def listDynamicConditions(self, name):
return libpyvle.dynamic_conditions_list(self.vpz, name)
##
#################
# views
def listViews(self):
return libpyvle.views_list(self.vpz)
def getViewName(self, name):
return libpyvle.view_get_name(self.vpz, name)
def getViewType(self, name):
return libpyvle.view_get_type(self.vpz, name)
def getViewTimeStep(self, name):
return libpyvle.view_get_timestep(self.vpz, name)
def getViewOutput(self, name):
return libpyvle.view_get_output(self.vpz, name)
def getViewData(self, name):
return libpyvle.view_get_data(self.vpz, name)
def setViewName(self, oldname, newname):
libpyvle.view_set_name(self.vpz, oldname, newname)
def setViewType(self, name, type):
libpyvle.view_set_type(self.vpz, name, type)
def setViewTimeStep(self, name, timestep):
libpyvle.view_set_timestep(self.vpz, name, timestep)
def setViewData(self, name, data):
libpyvle.view_set_data(self.vpz, name, data)
def addEventView(self, name, output):
libpyvle.views_add_eventview(self.vpz, name, output)
def addTimedView(self, name, output, time):
libpyvle.views_add_timedview(self.vpz, name, output, time)
def addFinishView(self, name, output):
libpyvle.views_add_finishview(self.vpz, name, output)
def getOutputPlugin(self, output):
return libpyvle.output_get_plugin(self.vpz, output)
def setOutputPlugin(self, output, location, format, plugin, package):
libpyvle.output_set_plugin(self.vpz, output, location, format, plugin, package)
def getOutputLocation(self, output):
return libpyvle.output_get_location(self.vpz, output)
def listOutputs(self):
return libpyvle.outputs_list(self.vpz)
# observables
def listObservables(self):
return libpyvle.observables_list(self.vpz)
def addObservable(self, name):
libpyvle.observable_add(self.vpz, name)
def delObservable(self, name):
libpyvle.observable_del(self.vpz, name)
def existObservable(self, name):
return libpyvle.observable_exists(self.vpz, name)
def clearObservables(self):
libpyvle.observables_clear(self.vpz)
def isObservablesEmpty(self):
return libpyvle.observables_empty(self.vpz)
def getObservableName(self, name):
return libpyvle.observable_get_name(self.vpz, name)
def listObservablePorts(self, name):
return libpyvle.observable_ports_list(self.vpz, name)
def addObservablePort(self, obsname, portname):
libpyvle.observable_add_port(self.vpz, obsname, portname)
def delObservablePort(self, obsname, portname):
libpyvle.observable_del_port(self.vpz, obsname, portname)
def hasObservableView(self, obsname, viewname):
return libpyvle.observable_has_view(self.vpz, obsname, viewname)
def getObservablePortName(self, obsname, viewname):
return libpyvle.observable_get_port_name(self.vpz, obsname, viewname)
def isPermanentObservable(self, obsname):
return libpyvle.observable_is_permanent(self.vpz, obsname)
def setPermanentObservable(self, obsname, ispermanent):
libpyvle.observable_set_permanent(self.vpz, obsname, ispermanent)
def getObservablePortAttachedViews(self, obsname, portname):
return libpyvle.observable_port_attached_views(self.vpz, obsname,
portname)
def listDynamicObservables(self, name):
return libpyvle.dynamic_observables_list(self.vpz, name)
def listViewsEntries(self):
return libpyvle.list_view_entries(self.vpz)
# dynamics
def listDynamics(self):
return libpyvle.dynamics_list(self.vpz)
def getDynamicName(self, name):
return libpyvle.dynamic_get_name(self.vpz, name)
def getDynamicModel(self, name):
return libpyvle.dynamic_get_model(self.vpz, name)
def getDynamicLibrary(self, name):
return libpyvle.dynamic_get_library(self.vpz, name)
def getDynamicLanguage(self, name):
return libpyvle.dynamic_get_language(self.vpz, name)
def setDynamicModel(self, name, model):
libpyvle.dynamic_set_model(self.vpz, name, model)
def setDynamicLibrary(self, name, library):
libpyvle.dynamic_set_library(self.vpz, name, library)
def setDynamicLanguage(self, name, language):
libpyvle.dynamic_set_language(self.vpz, name, language)
def listDynamicModels(self, name):
return libpyvle.dynamic_get_model_list(self.vpz, name)
# export
def export(self, location, view, type):
return libpyvle.export(self.vpz, location, view, type)
def exportManager(self, location, view, type):
return libpyvle.export_manager(self.vpz, location, view, type)
# other
def getExperimentName(self):
return libpyvle.experiment_get_name(self.vpz)
def traceRunError(self):
return libpyvle.trace_run_error(self.vpz)
def setPackageMode(self, name):
libpyvle.set_package_mode(name)
def setNormalMode(self):
libpyvle.set_normal_mode()
def runCombination(self, comb):
return libpyvle.run_combination(self.vpz, comb)
class VlePackage:
def __init__(self, name):
self.name = name
@classmethod
def getInstalledPackages(cls):
return libpyvle.get_installed_packages()
def getVpzList(self):
return libpyvle.get_package_vpz_list(self.name)
def getVpzDirectory(self):
return libpyvle.get_package_vpz_directory(self.name)
def getDataDirectory(self):
return libpyvle.get_package_data_directory(self.name)
def getOutputDirectory(self):
return libpyvle.get_package_output_directory(self.name)
def getVpz(self, vpz):
return libpyvle.get_package_vpz(self.name, vpz)
def to_value(x):
if isinstance(x, bool):
val = libpyvle.bool_to_value(x)
elif isinstance(x, int):
val = libpyvle.int_to_value(x)
elif isinstance(x, float):
val = libpyvle.real_to_value(x)
elif isinstance(x, str):
val = libpyvle.string_to_value(x)
elif isinstance(x, dict):
val = libpyvle.create_map()
for k,v in x.iteritems():
libpyvle.add_value_to_map(val, k, to_value(v))
elif isinstance(x, list):
val = libpyvle.create_set()
for v in x:
libpyvle.add_value_to_set(val, to_value(v))
elif isinstance(x, VleTuple):
if isinstance(x.val,list):
val = libpyvle.create_tuple(len(x.val))
i = 0
for v in x.val:
if isinstance(v,float):
libpyvle.set_value_to_tuple(val, i, v)
i = i+1
else:
raise ValueError(u'Can\'t convert type %s to float' % type(v))
else:
raise ValueError(u'Can\'t convert type %s to list' % type(x.val))
elif isinstance(x, VleTable):
val = None
i = 0
for v in x.val:
if isinstance(v,list):
j = 0
for v1 in v:
if isinstance(v1,float):
if (val == None):
val = libpyvle.create_table(len(v),len(x.val))
libpyvle.set_value_to_table(val, j,i,v1)
j = j+1
else:
raise ValueError(u'Can\'t convert type %s to float' % type(v1))
i = i+1
else:
raise ValueError(u'Can\'t convert type %s to list' % type(v))
elif isinstance(x, VleMatrix):
val = None
i = 0
for v in x.val:
if isinstance(v,list):
j = 0
for v1 in v:
if (val == None):
val = libpyvle.create_matrix(len(v),len(x.val))
libpyvle.set_value_to_matrix(val,j,i,to_value(v1))
j = j+1
i = i+1
else:
raise ValueError(u'Can\'t convert type %s to list' % type(v))
elif isinstance(x, VleXML):
if isinstance(x.val,str):
val = libpyvle.str_to_xml(x.val)
else:
raise ValueError(u'Can\'t convert type %s to str' % type(x.val))
elif isinstance(x, libpyvle.Value):
val = x
else:
raise ValueError(u'Can\'t convert type %s in vle::value::Value' %
type(x))
return val
def __compileTestPackages():
libpyvle.__compileTestPackages()
return None
|
gpl-3.0
| -6,933,170,966,559,748,000 | 31.393822 | 87 | 0.638796 | false |
wentixiaogege/newt-2.0
|
store/views.py
|
1
|
2855
|
from newt.views import AuthJSONRestView
from common.response import json_response
from django.conf import settings
import json
from importlib import import_module
store_adapter = import_module(settings.NEWT_CONFIG['ADAPTERS']['STORES']['adapter'])
import logging
logger = logging.getLogger("newt." + __name__)
# /api/store/
class StoreRootView(AuthJSONRestView):
def get(self, request):
logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__))
return store_adapter.get_store(request)
def post(self, request):
logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__))
initial_data = request.POST.getlist("data")
return store_adapter.create_store(request, initial_data=initial_data)
# /api/store/<store_name>/
class StoreView(AuthJSONRestView):
def get(self, request, store_name):
if request.GET.get("query", False):
# Queries the store if the query parameter is set
return store_adapter.query_store(request, store_name, request.GET.get("query"))
else:
# Returns all data about the store
return store_adapter.get_store_contents(request, store_name)
def post(self, request, store_name):
if store_name in store_adapter.get_store(request):
# Updates data if the store already exists
initial_data = request.POST.get("data", None)
return store_adapter.store_insert(request, store_name, initial_data=initial_data)
else:
# Creates and adds the data if the store doesn't exist
initial_data = request.POST.getlist("data")
return store_adapter.create_store(request, store_name, initial_data=initial_data)
def delete(self, request, store_name):
return store_adapter.delete_store(request, store_name)
# /api/store/<store_name>/perms/
class StorePermView(AuthJSONRestView):
def get(self, request, store_name):
return store_adapter.get_store_perms(request, store_name)
def post(self, request, store_name):
perms = json.loads(request.POST.get("data", "[]"))
return store_adapter.update_store_perms(request, store_name, perms=perms)
# /api/store/<store_name>/<obj_id>/
class StoreObjView(AuthJSONRestView):
def get(self, request, store_name, obj_id):
return store_adapter.store_get_obj(request, store_name, obj_id)
def put(self, request, store_name, obj_id):
data = json.loads(request.body).get("data", None)
if not data:
return json_response(status="ERROR", status_code=400, error="No data received.")
return store_adapter.store_update(request, store_name, obj_id, data=data)
# /api/store/<query>/
class ExtraStoreView(AuthJSONRestView):
def get(self, request, query):
return acct_adapter.extras_router(request, query)
|
bsd-2-clause
| -4,546,405,815,730,923,500 | 38.666667 | 93 | 0.670053 | false |
nschank/latexml
|
stringutil.py
|
1
|
2656
|
# import re
# COMMENT_PATERN = re.compile("([^\\\\])%.*($)", flags=re.MULTILINE)
# REDACT = "%"
# COMMENT_REPLACE = "\\1{}\\2".format(REDACT)
# def strip_comments(document_contents):
# return COMMENT_PATERN.sub(COMMENT_REPLACE, document_contents)
import strip_comments
def strip_latex_comments(document_contents):
return strip_comments.strip_comments(document_contents)
test1in = """% Some comment
% Comment w/ escaped percent sign: \%
%%% Comment starting w/ multiple percents
Here's some math: $3 + 3 = 6$ % and here's a comment
% Yes.
Look a percent sign: \\%. Isn't it great?
Hard newline then comment: \\\\%this is a comment.
% p break:
\\begin{verbatim}%
% Unescaped % percent % signs are allowed here.
As are backslashes and such \\%\\\\%%% Hello. %
\\end{verbatim}% This is a line comment.
We want to keep comment env in its entirety:
\\begin{comment}% This should stay
This is inside the comment environment.
It (\\%) should stay.\\\\ % So should I.
\\end{comment} This should leave.
%"""
test1out = """Here's some math: $3 + 3 = 6$
Look a percent sign: \\%. Isn't it great?
Hard newline then comment: \\\\
\\begin{verbatim}%
% Unescaped % percent % signs are allowed here.
As are backslashes and such \\%\\\\%%% Hello. %
\\end{verbatim}
We want to keep comment env in its entirety:
\\begin{comment}% This should stay
This is inside the comment environment.
It (\\%) should stay.\\\\ % So should I.
\\end{comment}
"""
def strip_latex_comments_test():
return strip_latex_comments(test1in) == test1out
def main():
passed = strip_latex_comments_test()
print "Test {}".format("passed" if passed else "failed")
foo = """%W
X% p break:
Y%hello
\\begin{verbatim}
blah
\\end{verbatim}
"""
def debug_foo():
print "------------------------------- OLD --------------------------------"
print foo
print "------------------------------- ... --------------------------------"
out = strip_latex_comments(foo)
print "------------------------------- NEW --------------------------------"
print out
def debug_big_test():
print "------------------------------- OLD --------------------------------"
print test1in
print "------------------------------- ... --------------------------------"
out = strip_latex_comments(test1in)
print "------------------------------- NEW --------------------------------"
print out
print "------------------------------- EXP --------------------------------"
print test1out
print "--------------------------------------------------------------------"
if __name__ == '__main__':
# debug_foo()
# debug_big_test()
main()
|
mit
| 2,413,618,694,096,714,000 | 29.181818 | 80 | 0.518449 | false |
commtrack/commtrack-old-to-del
|
apps/reports/custom/grameen.py
|
1
|
7030
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import inspect
from django.template.loader import render_to_string
from django.db import connection
import settings
from xformmanager.models import Metadata, FormDefModel, ElementDefModel
from reports.models import Case, SqlReport
from reports.util import get_whereclause
from shared import monitoring_report, Mother
'''Report file for custom Grameen reports'''
# see mvp.py for an explanation of how these are used.
# temporarily "privatizing" the name because grameen doesn't
# want this report to show up in the UI
def _monitoring(request):
'''Safe Pregnancy Monitoring Report'''
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
return monitoring_report(request, case)
def _mother_summary(request):
'''Individual Mother Summary'''
# this is intentionally private, as it's only accessed from within other
# reports that explicitly know about it. We don't want to list it because
# we don't know what id to use.
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
if not "case_id" in request.GET:
return '''Sorry, you have to specify a mother using the case id
in the URL.'''
case_id = request.GET["case_id"]
data = case.get_data_map_for_case(case_id)
mom = Mother(case, case_id, data)
mother_name = request.GET["mother_name"]
if mom.mother_name != mother_name:
return '''<p class="error">Sorry it appears that this id has been used by the CHW for
more than one mother. Unfortunately, this means we can't
yet show you her data here. Please remind your CHW's to
use unique case Ids!</p>
'''
attrs = [name for name in dir(mom) if not name.startswith("_")]
attrs.remove("data_map")
display_attrs = [attr.replace("_", " ") for attr in attrs]
all_attrs = zip(attrs, display_attrs)
mom.hi_risk_reasons = _get_hi_risk_reason(mom)
return render_to_string("custom/grameen/mother_details.html",
{"mother": mom, "attrs": all_attrs,
"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
})
def _get_hi_risk_reason(mom):
reasons = []
if (mom.mother_age >= 35): reasons.append("35 or older")
if (mom.mother_age <= 18): reasons.append("18 or younger")
if (mom.mother_height == 'under_150'): reasons.append("mother height under 150cm")
if (mom.previous_csection == 'yes'): reasons.append("previous c-section")
if (mom.previous_newborn_death == 'yes'): reasons.append("previous newborn death")
if (mom.previous_bleeding == 'yes'): reasons.append("previous bleeding")
if (mom.previous_terminations >= 3): reasons.append("%s previous terminations" % mom.previous_terminations)
if (mom.previous_pregnancies >= 5): reasons.append("%s previous pregnancies" % mom.previous_pregnancies)
if (mom.heart_problems == 'yes'): reasons.append("heart problems")
if (mom.diabetes == 'yes'): reasons.append("diabetes")
if (mom.hip_problems == 'yes'): reasons.append("hip problems")
if (mom.card_results_syphilis_result == 'positive'): reasons.append("positive for syphilis")
if (mom.card_results_hepb_result == 'positive'): reasons.append("positive for hepb")
if (mom.over_5_years == 'yes'): reasons.append("over 5 years since last pregnancy")
if (mom.card_results_hb_test == 'below_normal'): reasons.append("low hb test")
if (mom.card_results_blood_group == 'onegative'): reasons.append("o-negative blood group")
if (mom.card_results_blood_group == 'anegative'): reasons.append("a-negative blood group")
if (mom.card_results_blood_group == 'abnegative'): reasons.append("ab-negative blood group")
if (mom.card_results_blood_group == 'bnegative'): reasons.append("b-negative blood group")
return ", ".join(reasons)
def hi_risk_pregnancies(request):
'''Hi-Risk Pregnancy Summary'''
# just pass on to the helper view, but ensure that hi-risk is set to yes
params = request.GET.copy()
params["sampledata_hi_risk"]="yes"
return _chw_submission_summary(request, params)
def chw_submission_details(request):
'''Health Worker Submission Details'''
return _chw_submission_summary(request, request.GET)
def _chw_submission_summary(request, params):
# this was made a private method so that we can call it from multiple reports
# with an extra parameter.
# had to move this form a sql report to get in the custom annotations
# this is a pretty ugly/hacky hybrid approach, and should certainly
# be cleaned up
extuser = request.extuser
# hard coded to our fixture. bad bad!
grameen_submission_details_id = 2
# hard coded to our schema. bad bad!
form_def = ElementDefModel.objects.get(table_name="schema_intel_grameen_safe_motherhood_registration_v0_3").form
report = SqlReport.objects.get(id=grameen_submission_details_id)
cols = ('meta_username', 'sampledata_hi_risk')
where_cols = dict([(key, val) for key, val in params.items() if key in cols])
whereclause = get_whereclause(where_cols)
follow_filter = None
if "follow" in params:
if params["follow"] == "yes":
follow_filter = True
elif params["follow"] == "no":
follow_filter = False
cols, data = report.get_data({"whereclause": whereclause})
new_data = []
for row in data:
new_row_data = dict(zip(cols, row))
row_id = new_row_data["Instance ID"]
meta = Metadata.objects.get(formdefmodel=form_def, raw_data=row_id)
follow = meta.attachment.annotations.count() > 0
if follow_filter is not None:
if follow_filter and not follow:
# filtering on true, but none found, don't include this
continue
elif not follow_filter and follow:
# filtering on false, but found follows, don't include this
continue
new_row_data["Follow up?"] = "yes" if follow else "no"
new_row_data["meta"] = meta
new_row_data["attachment"] = meta.attachment
new_data.append(new_row_data)
cols = cols[:6]
return render_to_string("custom/grameen/chw_submission_details.html",
{"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
"columns": cols,
"data": new_data})
|
bsd-3-clause
| -1,309,282,711,300,028,700 | 46.5 | 116 | 0.644381 | false |
klebercode/econordeste
|
econordeste/event/urls.py
|
1
|
1067
|
# coding: utf-8
from django.conf.urls import patterns, url
from econordeste.event.views import (CalendarYearArchiveView,
CalendarMonthArchiveView,
CalendarDayArchiveView,
CalendarListView,
CalendarDateDetailView)
urlpatterns = patterns(
'econordeste.event.views',
url(r'^$', CalendarListView.as_view(), name='home'),
url(r'^(?P<year>\d{4})/$',
CalendarYearArchiveView.as_view(),
name='calendar_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\d+)/$',
CalendarMonthArchiveView.as_view(month_format='%m'),
name='calendar_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\d+)/(?P<day>\d+)/$',
CalendarDayArchiveView.as_view(month_format='%m'),
name='calendar_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\d+)/(?P<day>\d+)/(?P<slug>[-\w]+)/$',
CalendarDateDetailView.as_view(month_format='%m'),
name='calendar_date_detail'),
)
|
mit
| -6,329,672,368,472,672,000 | 40.038462 | 75 | 0.54358 | false |
chfoo/wpull
|
wpull/scraper/util.py
|
1
|
6973
|
'''Misc functions.'''
import functools
import gettext
import itertools
import logging
import mimetypes
import re
import string
import wpull.url
from wpull.backport.logging import BraceMessage as __
from wpull.pipeline.item import LinkType
_ = gettext.gettext
_logger = logging.getLogger(__name__)
def parse_refresh(text):
'''Parses text for HTTP Refresh URL.
Returns:
str, None
'''
match = re.search(r'url\s*=(.+)', text, re.IGNORECASE)
if match:
url = match.group(1)
if url.startswith('"'):
url = url.strip('"')
elif url.startswith("'"):
url = url.strip("'")
return clean_link_soup(url)
def clean_link_soup(link):
'''Strip whitespace from a link in HTML soup.
Args:
link (str): A string containing the link with lots of whitespace.
The link is split into lines. For each line, leading and trailing
whitespace is removed and tabs are removed throughout. The lines are
concatenated and returned.
For example, passing the ``href`` value of::
<a href=" http://example.com/
blog/entry/
how smaug stole all the bitcoins.html
">
will return
``http://example.com/blog/entry/how smaug stole all the bitcoins.html``.
Returns:
str: The cleaned link.
'''
return ''.join(
[line.strip().replace('\t', '') for line in link.splitlines()]
)
def urljoin_safe(base_url, url, allow_fragments=True):
'''urljoin with warning log on error.
Returns:
str, None'''
try:
return wpull.url.urljoin(
base_url, url, allow_fragments=allow_fragments
)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=url, error=error
))
def is_likely_inline(link):
'''Return whether the link is likely to be inline.'''
file_type = mimetypes.guess_type(link, strict=False)[0]
if file_type:
top_level_type, subtype = file_type.split('/', 1)
return top_level_type in ('image', 'video', 'audio') or subtype == 'javascript'
_mimetypes_db = mimetypes.MimeTypes()
MIMETYPES = frozenset(
itertools.chain(
_mimetypes_db.types_map[0].values(),
_mimetypes_db.types_map[1].values(),
['text/javascript']
)
)
ALPHANUMERIC_CHARS = frozenset(string.ascii_letters + string.digits)
NUMERIC_CHARS = frozenset(string.digits)
COMMON_TLD = frozenset(['com', 'org', 'net', 'int', 'edu', 'gov', 'mil'])
HTML_TAGS = frozenset([
"a", "abbr", "acronym", "address",
"applet", "area", "article", "aside", "audio", "b",
"base", "basefont", "bdi", "bdo", "big", "blockquote",
"body", "br", "button", "canvas", "caption", "center",
"cite", "code", "col", "colgroup", "command",
"datalist", "dd", "del", "details", "dfn", "dir",
"div", "dl", "dt", "em", "embed", "fieldset",
"figcaption", "figure", "font", "footer", "form",
"frame", "frameset", "head", "header", "hgroup", "h1",
"h2", "h3", "h4", "h5", "h6", "hr", "html", "i",
"iframe", "img", "input", "ins", "kbd", "keygen",
"label", "legend", "li", "link", "map", "mark", "menu",
"meta", "meter", "nav", "noframes", "noscript",
"object", "ol", "optgroup", "option", "output", "p",
"param", "pre", "progress", "q", "rp", "rt", "ruby",
"s", "samp", "script", "section", "select", "small",
"source", "span", "strike", "strong", "style", "sub",
"summary", "sup", "table", "tbody", "td", "textarea",
"tfoot", "th", "thead", "time", "title", "tr", "track",
"tt", "u", "ul", "var", "video", "wbr"
])
FIRST_PART_TLD_PATTERN = re.compile(r'[^/][a-zA-Z0-9.-]+\.({})/.'.format('|'.join(COMMON_TLD)), re.IGNORECASE)
# These "likely link" functions are based from
# https://github.com/internetarchive/heritrix3/
# blob/339e6ec87a7041f49c710d1d0fb94be0ec972ee7/commons/src/
# main/java/org/archive/util/UriUtils.java
def is_likely_link(text):
'''Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
text = text.lower()
# Check for absolute or relative URLs
if (
text.startswith('http://')
or text.startswith('https://')
or text.startswith('ftp://')
or text.startswith('/')
or text.startswith('//')
or text.endswith('/')
or text.startswith('../')
):
return True
# Check if it has a alphanumeric file extension and not a decimal number
dummy, dot, file_extension = text.rpartition('.')
if dot and file_extension and len(file_extension) <= 4:
file_extension_set = frozenset(file_extension)
if file_extension_set \
and file_extension_set <= ALPHANUMERIC_CHARS \
and not file_extension_set <= NUMERIC_CHARS:
if file_extension in COMMON_TLD:
return False
file_type = mimetypes.guess_type(text, strict=False)[0]
if file_type:
return True
else:
return False
def is_unlikely_link(text):
'''Return whether the text is likely to cause false positives.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
# Check for string concatenation in JavaScript
if text[:1] in ',;+:' or text[-1:] in '.,;+:':
return True
# Check for unusual characters
if re.search(r'''[\\$()'"[\]{}|<>`]''', text):
return True
if text[:1] == '.' \
and not text.startswith('./') \
and not text.startswith('../'):
return True
if text in ('/', '//'):
return True
if '//' in text and '://' not in text and not text.startswith('//'):
return True
# Forbid strings like mimetypes
if text in MIMETYPES:
return True
tag_1, dummy, tag_2 = text.partition('.')
if tag_1 in HTML_TAGS and tag_2 != 'html':
return True
# Forbid things where the first part of the path looks like a domain name
if FIRST_PART_TLD_PATTERN.match(text):
return True
@functools.lru_cache()
def identify_link_type(filename):
'''Return link type guessed by filename extension.
Returns:
str: A value from :class:`.item.LinkType`.
'''
mime_type = mimetypes.guess_type(filename)[0]
if not mime_type:
return
if mime_type == 'text/css':
return LinkType.css
elif mime_type == 'application/javascript':
return LinkType.javascript
elif mime_type == 'text/html' or mime_type.endswith('xml'):
return LinkType.html
elif mime_type.startswith('video') or \
mime_type.startswith('image') or \
mime_type.startswith('audio') or \
mime_type.endswith('shockwave-flash'):
return LinkType.media
|
gpl-3.0
| -2,034,855,246,780,875,500 | 27.797521 | 110 | 0.587028 | false |
leekchan/tornado_test
|
tornado/test/httpserver_test.py
|
1
|
39635
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(),
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
self.assertRaises(ValueError, HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises(ValueError, HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_certificate
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
b"\r\n".join(headers +
[utf8("Content-Length: %d\r\n" % len(body))]) +
b"\r\n" + body)
read_stream_body(stream, self.stop)
headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u("\u00e1").encode("utf-8"),
b"--1234567890",
u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
b"",
u("\u00fa").encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u("\u00e9"), data["header"])
self.assertEqual(u("\u00e1"), data["argument"])
self.assertEqual(u("\u00f3"), data["filename"])
self.assertEqual(u("\u00fa"), data["filebody"])
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket(), io_loop=self.io_loop)
stream.connect(("localhost", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("\u00e9")]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u(""), u("")]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True)
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app, io_loop=self.io_loop)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.0 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(self.http_version + b' 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, connection):
return StreamingChunkSizeTest.MessageDelegate(connection)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
self.assertEqual(response.code, 599)
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
|
apache-2.0
| -6,279,619,311,202,094,000 | 36.497635 | 110 | 0.599546 | false |
jeremiahyan/odoo
|
odoo/tools/template_inheritance.py
|
1
|
9597
|
from lxml import etree
from lxml.builder import E
import copy
import itertools
import logging
from odoo.tools.translate import _
from odoo.tools import SKIPPED_ELEMENT_TYPES, html_escape
_logger = logging.getLogger(__name__)
def add_text_before(node, text):
""" Add text before ``node`` in its XML tree. """
if text is None:
return
prev = node.getprevious()
if prev is not None:
prev.tail = (prev.tail or "") + text
else:
parent = node.getparent()
parent.text = (parent.text or "") + text
def add_text_inside(node, text):
""" Add text inside ``node``. """
if text is None:
return
if len(node):
node[-1].tail = (node[-1].tail or "") + text
else:
node.text = (node.text or "") + text
def remove_element(node):
""" Remove ``node`` but not its tail, from its XML tree. """
add_text_before(node, node.tail)
node.tail = None
node.getparent().remove(node)
def locate_node(arch, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param arch: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
if spec.tag == 'xpath':
expr = spec.get('expr')
try:
xPath = etree.ETXPath(expr)
except etree.XPathSyntaxError:
_logger.error("XPathSyntaxError while parsing xpath %r", expr)
raise
nodes = xPath(arch)
return nodes[0] if nodes else None
elif spec.tag == 'field':
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in arch.iter('field'):
if node.get('name') == spec.get('name'):
return node
return None
for node in arch.iter(spec.tag):
if isinstance(node, SKIPPED_ELEMENT_TYPES):
continue
if all(node.get(attr) == spec.get(attr) for attr in spec.attrib
if attr not in ('position', 'version')):
# Version spec should match parent's root element's version
if spec.get('version') and spec.get('version') != arch.get('version'):
return None
return node
return None
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=lambda s: True):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param Element source: a parent architecture to modify
:param Element specs_tree: a modifying architecture in an inheriting view
:param bool inherit_branding:
:param pre_locate: function that is executed before locating a node.
This function receives an arch as argument.
This is required by studio to properly handle group_ids.
:return: a modified source where the specs are applied
:rtype: Element
"""
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = specs_tree if isinstance(specs_tree, list) else [specs_tree]
def extract(spec):
"""
Utility function that locates a node given a specification, remove
it from the source and returns it.
"""
if len(spec):
raise ValueError(
_("Invalid specification for moved nodes: %r", etree.tostring(spec, encoding='unicode'))
)
pre_locate(spec)
to_extract = locate_node(source, spec)
if to_extract is not None:
remove_element(to_extract)
return to_extract
else:
raise ValueError(
_("Element %r cannot be located in parent view", etree.tostring(spec, encoding='unicode'))
)
while len(specs):
spec = specs.pop(0)
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
continue
if spec.tag == 'data':
specs += [c for c in spec]
continue
pre_locate(spec)
node = locate_node(source, spec)
if node is not None:
pos = spec.get('position', 'inside')
if pos == 'replace':
for loc in spec.xpath(".//*[text()='$0']"):
loc.text = ''
loc.append(copy.deepcopy(node))
if node.getparent() is None:
spec_content = None
comment = None
for content in spec:
if content.tag is not etree.Comment:
spec_content = content
break
else:
comment = content
source = copy.deepcopy(spec_content)
# only keep the t-name of a template root node
t_name = node.get('t-name')
if t_name:
source.set('t-name', t_name)
if comment is not None:
text = source.text
source.text = None
comment.tail = text
source.insert(0, comment)
else:
replaced_node_tag = None
for child in spec:
if child.get('position') == 'move':
child = extract(child)
if inherit_branding and not replaced_node_tag and child.tag is not etree.Comment:
# To make a correct branding, we need to
# - know exactly which node has been replaced
# - store it before anything else has altered the Tree
# Do it exactly here :D
child.set('meta-oe-xpath-replacing', node.tag)
# We just store the replaced node tag on the first
# child of the xpath replacing it
replaced_node_tag = node.tag
node.addprevious(child)
node.getparent().remove(node)
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = child.get('name')
value = child.text or ''
if child.get('add') or child.get('remove'):
assert not child.text
separator = child.get('separator', ',')
if separator == ' ':
separator = None # squash spaces
to_add = (
s for s in (s.strip() for s in child.get('add', '').split(separator))
if s
)
to_remove = {s.strip() for s in child.get('remove', '').split(separator)}
values = (s.strip() for s in node.get(attribute, '').split(separator))
value = (separator or ' ').join(itertools.chain(
(v for v in values if v not in to_remove),
to_add
))
if value:
node.set(attribute, value)
elif attribute in node.attrib:
del node.attrib[attribute]
elif pos == 'inside':
add_text_inside(node, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
node.append(child)
elif pos == 'after':
# add a sentinel element right after node, insert content of
# spec before the sentinel, then remove the sentinel element
sentinel = E.sentinel()
node.addnext(sentinel)
add_text_before(sentinel, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
sentinel.addprevious(child)
remove_element(sentinel)
elif pos == 'before':
add_text_before(node, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
node.addprevious(child)
else:
raise ValueError(
_("Invalid position attribute: '%s'") %
pos
)
else:
attrs = ''.join([
' %s="%s"' % (attr, html_escape(spec.get(attr)))
for attr in spec.attrib
if attr != 'position'
])
tag = "<%s%s>" % (spec.tag, attrs)
raise ValueError(
_("Element '%s' cannot be located in parent view", tag)
)
return source
|
gpl-3.0
| 2,848,593,791,806,002,000 | 39.323529 | 106 | 0.51068 | false |
UMWRG/PywrApp
|
tests/test_nodes_edges.py
|
1
|
1490
|
"""
The unit tests in this module test the internal behaviour of the Pywr-Hydra application.
"""
from hydra_pywr.importer import PywrHydraImporter
import pytest
import json
@pytest.fixture()
def pywr_nodes_edges():
""" Example node and edge data from Pywr.
This data looks like "nodes" and "edges" section of a Pywr JSON file.
"""
nodes_edges = {
"nodes": [
{
"name": "supply1",
"type": "Input",
"max_flow": 15
},
{
"name": "link1",
"type": "Link"
},
{
"name": "demand1",
"type": "Output",
"max_flow": 10,
"cost": -10
}
],
"edges": [
["supply1", "link1"],
["link1", "demand1"]
]
}
return nodes_edges
@pytest.fixture()
def pywr_nodes_edges_importer(pywr_nodes_edges):
# Note the use of a fake template here because we're not testing nodes/links.
return PywrHydraImporter(pywr_nodes_edges, {'templatetypes': []})
def test_nodes_to_attributes(pywr_nodes_edges_importer):
importer = pywr_nodes_edges_importer
attributes = importer.attributes_from_nodes()
attribute_names = [a['name'] for a in attributes]
for key in ('max_flow', 'cost'):
assert key in attribute_names
for key in ('name', 'comment', 'type'):
assert key not in attribute_names
|
gpl-3.0
| -2,707,433,787,708,751,400 | 24.254237 | 88 | 0.53557 | false |
dingzg/onepanel
|
bin/install_config.py
|
1
|
3571
|
#!/usr/bin/env python2.6
#-*- coding: utf-8 -*-
# Copyright [OnePanel]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
#sys.path.append(os.path.join(os.path.split(os.path.dirname(__file__))[0], r'../lib'))
root_path = os.path.split(os.path.dirname(__file__))[0]
sys.path.insert(0, os.path.join(root_path, 'lib'))
import socket
import hashlib
import hmac
import time
import datetime
import base64
from com.config import Config
from com.utils import randstr, is_valid_ip
if __name__ == "__main__":
if len(sys.argv) != 3:
print '''Usage: %s option value
OPTIONS:
ip: ip address (need restart)
port: port number (need restart)
username: username of admin account
password: password of admin account
loginlock: set the login lock. value: on or off
accesskey: access key for remote access, must be empty
or a 64-bytes string with base64 encoded.
accesskeyenable: set the remote access switch. value: on or off
''' % sys.argv[0]
sys.exit()
data_path = os.path.join(os.path.split(os.path.dirname(__file__))[0], r'data')
config = Config(data_path + '/config.ini')
option, value = sys.argv[1:]
if option == 'ip':
if value != '*' and not is_valid_ip(value):
print 'Error: %s is not a valid IP address' % value
sys.exit(-1)
config.set('server', 'ip', value)
elif option == 'port':
port = int(value)
if not port > 0 and port < 65535:
print 'Error: port number should between 0 and 65535'
sys.exit(-1)
config.set('server', 'port', value)
elif option == 'username':
config.set('auth', 'username', value)
elif option == 'password':
key = randstr()
md5 = hashlib.md5(value).hexdigest()
pwd = hmac.new(key, md5).hexdigest()
config.set('auth', 'password', '%s:%s' % (pwd, key))
elif option == 'loginlock':
if value not in ('on', 'off'):
print 'Error: loginlock value should be either on or off'
sys.exit(-1)
if value == 'on':
config.set('runtime', 'loginlock', 'on')
config.set('runtime', 'loginfails', 0)
config.set('runtime', 'loginlockexpire',
int(time.mktime(datetime.datetime.max.timetuple())))
elif value == 'off':
config.set('runtime', 'loginlock', 'off')
config.set('runtime', 'loginfails', 0)
config.set('runtime', 'loginlockexpire', 0)
elif option == 'accesskey':
if value != '':
try:
if len(base64.b64decode(value)) != 32: raise Exception()
except:
print 'Error: invalid accesskey format'
sys.exit(-1)
config.set('auth', 'accesskey', value)
elif option == 'accesskeyenable':
if value not in ('on', 'off'):
print 'Error: accesskeyenable value should be either on or off'
sys.exit(-1)
config.set('auth', 'accesskeyenable', value)
|
apache-2.0
| -6,886,681,214,839,713,000 | 36.208333 | 86 | 0.602352 | false |
Eric89GXL/vispy
|
vispy/util/fonts/tests/test_font.py
|
1
|
1301
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
import warnings
from vispy.testing import assert_in, run_tests_if_main
from vispy.util.fonts import list_fonts, _load_glyph, _vispy_fonts
import pytest
known_bad_fonts = set([
'Noto Color Emoji', # https://github.com/vispy/vispy/issues/1771
])
# try both a vispy and system font <--- what does this mean???
sys_fonts = set(list_fonts()) - set(_vispy_fonts)
def test_font_list():
"""Test font listing"""
f = list_fonts()
assert len(f) > 0
for font in _vispy_fonts:
assert_in(font, f)
@pytest.mark.parametrize('face', ['OpenSans'] + sorted(sys_fonts))
def test_font_glyph(face):
"""Test loading glyphs"""
if face in known_bad_fonts:
pytest.xfail()
font_dict = dict(face=face, size=12, bold=False, italic=False)
glyphs_dict = dict()
chars = 'foobar^C&#'
for char in chars:
# Warning that Arial might not exist
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
_load_glyph(font_dict, char, glyphs_dict)
assert len(glyphs_dict) == np.unique([c for c in chars]).size
run_tests_if_main()
|
bsd-3-clause
| -699,862,398,090,827,600 | 28.568182 | 73 | 0.654881 | false |
yenliangl/bitcoin
|
test/functional/test_runner.py
|
1
|
26026
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_disableprivatekeys.py',
'wallet_disableprivatekeys.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_₿_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
# If we fixed the command-line and filename encoding issue on Windows, these two lines could be removed
if config["environment"]["EXEEXT"] == ".exe":
tmpdir = "%s/test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
mit
| 4,196,581,227,555,377,700 | 38.596651 | 205 | 0.612339 | false |
ciarams87/PyU4V
|
PyU4V/tests/unit_tests/test_pyu4v_common.py
|
1
|
28205
|
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_pyu4v_common.py."""
import csv
import testtools
import time
from unittest import mock
from PyU4V import common
from PyU4V import rest_requests
from PyU4V.tests.unit_tests import pyu4v_common_data as pcd
from PyU4V.tests.unit_tests import pyu4v_fakes as pf
from PyU4V import univmax_conn
from PyU4V.utils import constants
from PyU4V.utils import exception
# Resource constants
SLOPROVISIONING = constants.SLOPROVISIONING
SYMMETRIX = constants.SYMMETRIX
VOLUME = constants.VOLUME
UNISPHERE_VERSION = constants.UNISPHERE_VERSION
class PyU4VCommonTest(testtools.TestCase):
"""Test common."""
def setUp(self):
"""setUp."""
super(PyU4VCommonTest, self).setUp()
self.data = pcd.CommonData()
self.conf_file, self.conf_dir = (
pf.FakeConfigFile.create_fake_config_file())
univmax_conn.file_path = self.conf_file
with mock.patch.object(
rest_requests.RestRequests, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
self.conn = univmax_conn.U4VConn()
self.common = self.conn.common
self.common.interval = 1
self.common.retries = 1
def tearDown(self):
"""tearDown."""
super(PyU4VCommonTest, self).tearDown()
pf.FakeConfigFile.delete_fake_config_file(
self.conf_file, self.conf_dir)
def test_wait_for_job_complete(self):
"""Test wait_for_job_complete."""
_, _, status, _ = self.common.wait_for_job_complete(
self.data.job_list[0])
self.assertEqual('SUCCEEDED', status)
@mock.patch.object(common.CommonFunctions, '_is_job_finished',
return_value=(True, '', 0, 'SUCCEEDED', ''))
def test_wait_for_job_complete_running(self, mock_job):
"""Test wait_for_job_complete running."""
_, _, status, _ = self.common.wait_for_job_complete(
self.data.job_list[1])
self.assertEqual('SUCCEEDED', status)
@mock.patch.object(common.CommonFunctions, '_is_job_finished',
side_effect=[exception.VolumeBackendAPIException(
'random exception')])
def test_wait_for_job_complete_exception(self, mock_job):
"""Test wait_for_job_complete exception."""
self.assertRaises(exception.VolumeBackendAPIException,
self.common.wait_for_job_complete,
self.data.job_list[1])
@mock.patch.object(common.CommonFunctions, '_is_job_finished',
return_value=(False, '', 0, 'RUNNING', ''))
def test_wait_for_job_complete_timeout(self, mock_job):
"""Test wait_for_job_complete timeout."""
self.common.retries = 0
rc, result, status, _ = self.common.wait_for_job_complete(
self.data.job_list[1])
self.assertEqual('RUNNING', status)
self.assertEqual(-1, rc)
self.assertIsNone(result)
def test_get_job_by_id(self):
"""Test get_job_by_id."""
job = self.common.get_job_by_id(self.data.job_list[0]['jobId'])
self.assertEqual('SUCCEEDED', job['status'])
self.assertEqual('12345', job['jobId'])
@mock.patch.object(common.CommonFunctions, 'get_job_by_id',
return_value=pcd.CommonData.job_list[0])
def test_is_job_finished_success(self, mock_job):
job = self.common._is_job_finished(
self.data.job_list[0]['jobId'])
self.assertEqual((True, None, 0, 'SUCCEEDED', None), job)
@mock.patch.object(common.CommonFunctions, 'get_job_by_id',
return_value=pcd.CommonData.job_list[2])
def test_is_job_finished_failure(self, mock_job):
job = self.common._is_job_finished(
self.data.job_list[2]['jobId'])
self.assertEqual((True, None, -1, 'FAILED', None), job)
@mock.patch.object(common.CommonFunctions, 'get_job_by_id',
return_value=pcd.CommonData.job_list[1])
def test_is_job_finished_incomplete(self, mock_job):
job = self.common._is_job_finished(
self.data.job_list[1]['jobId'])
self.assertEqual((False, None, 0, 'RUNNING', None), job)
def test_check_status_code_success(self):
"""Test check_status_code_success."""
self.common.check_status_code_success(
'test-success', 201, '')
self.assertRaises(exception.ResourceNotFoundException,
self.common.check_status_code_success,
'test-404', 404, '')
self.assertRaises(exception.UnauthorizedRequestException,
self.common.check_status_code_success,
'test-401', 401, '')
self.assertRaises(exception.VolumeBackendAPIException,
self.common.check_status_code_success,
'test-500', 500, '')
@mock.patch.object(common.CommonFunctions, 'wait_for_job_complete',
side_effect=[(0, '', '', ''), (1, '', '', '')])
def test_wait_for_job(self, mock_complete):
"""Test wait_for_job."""
# Not an async job
self.common.wait_for_job('sync-job', 200, dict())
mock_complete.assert_not_called()
# Async, completes successfully
self.common.wait_for_job('sync-job', 202, dict())
mock_complete.assert_called_once()
# Async, job fails
self.assertRaises(exception.VolumeBackendAPIException,
self.common.wait_for_job, 'sync-job', 202, dict())
def test_build_uri_unhidden(self):
"""Test build_target_uri."""
with mock.patch.object(self.common, '_build_uri') as mck_build:
self.common.build_target_uri(
'test_arg', test_kwarg_in='test_kwarg')
mck_build.assert_called_once_with(
'test_arg', test_kwarg_in='test_kwarg')
def test_build_uri_version_control(self):
"""Test _build_uri."""
# No version supplied, use self.U4V_VERSION
built_uri_1 = self.common._build_uri(
category=SLOPROVISIONING, resource_level=SYMMETRIX,
resource_level_id=self.data.array, resource_type=VOLUME)
uri_1 = ('/{ver}/sloprovisioning/symmetrix/{array}/volume'.format(
ver=UNISPHERE_VERSION, array=self.data.array))
self.assertEqual(uri_1, built_uri_1)
# version supplied as keyword argument
resource_name = self.data.device_id
version_2 = self.data.U4P_VERSION
built_uri_2 = self.common._build_uri(
category=SLOPROVISIONING, resource_level=SYMMETRIX,
resource_level_id=self.data.array, resource_type=VOLUME,
resource_type_id=resource_name, version=version_2)
uri_2 = (
'/{ver}/sloprovisioning/symmetrix/{array}/volume/{res}'.format(
ver=version_2, array=self.data.array, res=resource_name))
self.assertEqual(uri_2, built_uri_2)
# version and no_version keywords supplied, no_version overruled
built_uri_3 = self.common._build_uri(
category=SLOPROVISIONING, resource_level=SYMMETRIX,
resource_level_id=self.data.array, resource_type=VOLUME,
resource_type_id=resource_name, version=UNISPHERE_VERSION,
no_version=True)
uri_3 = (
'/{ver}/sloprovisioning/symmetrix/{array}/volume/{res}'.format(
ver=UNISPHERE_VERSION, array=self.data.array,
res=resource_name))
self.assertEqual(uri_3, built_uri_3)
# no_version flag passed, no version required for URI
built_uri_4 = self.common._build_uri(
category=SLOPROVISIONING, resource_level=SYMMETRIX,
resource_level_id=self.data.array, resource_type=VOLUME,
resource_type_id=resource_name, no_version=True)
uri_4 = ('/sloprovisioning/symmetrix/{array}/volume/{res}'.format(
array=self.data.array, res=resource_name))
self.assertEqual(uri_4, built_uri_4)
def test_traditional_build_uri(self):
"""Test _build_uri."""
# Only default args arrayID, category, resource_type passed
built_uri = self.common._build_uri(
self.data.array, 'sloprovisioning', 'volume')
temp_uri = (
'/{}/sloprovisioning/symmetrix/{array}/volume'.format(
self.data.U4P_VERSION, array=self.data.array))
self.assertEqual(temp_uri, built_uri)
# Default args passed along with resource_name and version kwarg
built_uri_2 = self.common._build_uri(
self.data.array, 'sloprovisioning', 'volume',
version=self.data.U4P_VERSION, resource_name=self.data.device_id)
temp_uri_2 = (
'/{}/sloprovisioning/symmetrix/{array}/volume/{res}'.format(
self.data.U4P_VERSION, array=self.data.array,
res=self.data.device_id))
self.assertEqual(temp_uri_2, built_uri_2)
def test_new_build_uri_minimum(self):
"""Test _build_uri."""
# Pass in only minimum required kwargs - version is optional
built_uri_1 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix')
temp_uri_1 = '/{}/sloprovisioning/symmetrix'.format(
self.data.U4P_VERSION)
self.assertEqual(temp_uri_1, built_uri_1)
def test_new_build_uri_resource_level_id(self):
"""Test _build_uri."""
# Pass in minimum kwargs with specified resource_level_id
built_uri_2 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array)
temp_uri_2 = ('/{}/sloprovisioning/symmetrix/{}'.format(
self.data.U4P_VERSION, self.data.array))
self.assertEqual(temp_uri_2, built_uri_2)
def test_new_build_uri_resource_type(self):
# Pass in minimum kwargs with specified resource_type
built_uri_3 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup')
temp_uri_3 = ('/{}/sloprovisioning/symmetrix/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup'))
self.assertEqual(temp_uri_3, built_uri_3)
def test_new_build_uri_resource_type_id(self):
# Pass in minimum kwargs with specified resource_type_id
built_uri_4 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup',
resource_type_id=self.data.storagegroup_name_1)
temp_uri_4 = ('/{}/sloprovisioning/symmetrix/{}/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup',
self.data.storagegroup_name_1))
self.assertEqual(temp_uri_4, built_uri_4)
def test_new_build_uri_resource(self):
# Pass in minimum kwargs with specified resource
built_uri_5 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup',
resource_type_id=self.data.storagegroup_name_1,
resource='snap')
temp_uri_5 = ('/{}/sloprovisioning/symmetrix/{}/{}/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup',
self.data.storagegroup_name_1, 'snap'))
self.assertEqual(temp_uri_5, built_uri_5)
def test_new_build_uri_resource_id(self):
# Pass in minimum kwargs with specified resource_id
built_uri_6 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup',
resource_type_id=self.data.storagegroup_name_1,
resource='snap', resource_id=self.data.snapshot_name)
temp_uri_6 = ('/{}/sloprovisioning/symmetrix/{}/{}/{}/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup',
self.data.storagegroup_name_1, 'snap', self.data.snapshot_name))
self.assertEqual(temp_uri_6, built_uri_6)
def test_new_build_uri_object_type(self):
# Pass in minimum kwargs with specified object_type
built_uri_7 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup',
resource_type_id=self.data.storagegroup_name_1,
resource='snap', resource_id=self.data.snapshot_name,
object_type='generation')
temp_uri_7 = ('/{}/sloprovisioning/symmetrix/{}/{}/{}/{}/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup',
self.data.storagegroup_name_1, 'snap', self.data.snapshot_name,
'generation'))
self.assertEqual(temp_uri_7, built_uri_7)
def test_new_build_uri_object_type_id(self):
# Pass in minimum kwargs with specified object_type_id
built_uri_8 = self.common._build_uri(
version=self.data.U4P_VERSION, category='sloprovisioning',
resource_level='symmetrix', resource_level_id=self.data.array,
resource_type='storagegroup',
resource_type_id=self.data.storagegroup_name_1,
resource='snap', resource_id=self.data.snapshot_name,
object_type='generation', object_type_id='1')
temp_uri_8 = (
'/{}/sloprovisioning/symmetrix/{}/{}/{}/{}/{}/{}/{}'.format(
self.data.U4P_VERSION, self.data.array, 'storagegroup',
self.data.storagegroup_name_1, 'snap',
self.data.snapshot_name, 'generation', '1'))
self.assertEqual(temp_uri_8, built_uri_8)
def test_new_build_uri_performance(self):
# Category is performance so no use of version in URI
built_uri_9 = self.common._build_uri(
category='performance', resource_level='Array',
resource_type='keys')
temp_uri_9 = '/performance/Array/keys'
self.assertEqual(temp_uri_9, built_uri_9)
def test_get_request(self):
"""Test get_request."""
message = self.common.get_request('/version', resource_type='version')
self.assertEqual(self.data.server_version, message)
def test_get_resource(self):
"""Test get_resource."""
# Traditional Method
message = self.common.get_resource(
self.data.array, 'sloprovisioning', 'volume',
resource_name=None, params=None)
self.assertEqual(self.data.volume_list[2], message)
# New Method
message_1 = self.common.get_resource(
category='sloprovisioning',
resource_level='symmetrix',
resource_level_id=self.data.array,
resource_type='volume')
self.assertEqual(self.data.volume_list[2], message_1)
def test_create_resource(self):
"""Test create_resource."""
# Traditional Method
message = self.common.create_resource(
self.data.array, 'sloprovisioning', 'storagegroup', dict())
self.assertEqual(self.data.job_list[0], message)
# New Method
message_1 = self.common.create_resource(
category='sloprovisioning',
resource_level='storagegroup',
resource_level_id=self.data.array)
self.assertEqual(self.data.job_list[0], message_1)
def test_modify_resource(self):
"""Test modify_resource."""
# Traditional Method
message = self.common.modify_resource(
self.data.array, 'sloprovisioning', 'storagegroup', dict())
self.assertEqual(self.data.job_list[0], message)
# New Method
message_1 = self.common.modify_resource(
category='sloprovisioning',
resource_level='storagegroup',
resource_level_id=self.data.array)
self.assertEqual(self.data.job_list[0], message_1)
def test_delete_resource(self):
"""Test delete_resource."""
# Traditional Method
self.common.delete_resource(
self.data.array, 'sloprovisioning',
'storagegroup', self.data.storagegroup_name)
# New Method
self.common.delete_resource(
category='sloprovisioning',
resource_level='storagegroup',
resource_level_id=self.data.array,
resource_type_id=self.data.storagegroup_name)
def test_create_list_from_file(self):
"""Test create_list_from_file."""
example_file = """Item1\nItem2\nItem3"""
with mock.patch('builtins.open', mock.mock_open(
read_data=example_file), create=True):
list_from_file = self.common.create_list_from_file(example_file)
self.assertIsInstance(list_from_file, list)
self.assertIn('Item1', list_from_file)
@mock.patch('builtins.open', new_callable=mock.mock_open)
def test_read_csv_values(self, mck_open):
"""Test read_csv_values."""
csv_response = [
{'kpi_a': 'perf_data_1', 'kpi_b': 'perf_data_2'},
{'kpi_a': 'perf_data_3', 'kpi_b': 'perf_data_4'},
{'kpi_a': 'perf_data_5', 'kpi_b': 'perf_data_6'}]
with mock.patch.object(csv, 'DictReader', return_value=csv_response):
csv_data = self.common.read_csv_values(file_name='mock_csv_file')
reference_csv_response = {
'kpi_a': ['perf_data_1', 'perf_data_3', 'perf_data_5'],
'kpi_b': ['perf_data_2', 'perf_data_4', 'perf_data_6']}
self.assertIsInstance(csv_data, dict)
self.assertEqual(reference_csv_response, csv_data)
def test_get_uni_version(self):
"""Test get_uni_version."""
version, major_version = self.common.get_uni_version()
self.assertEqual(self.data.server_version['version'], version)
self.assertEqual(self.data.u4v_version, major_version)
def test_get_array_list(self):
"""Test get_array_list."""
array_list = self.common.get_array_list()
self.assertEqual(self.data.symm_list['symmetrixId'], array_list)
def test_get_v3_or_newer_array_list(self):
"""Test get_v3_or_newer_array_list."""
array_list = self.common.get_v3_or_newer_array_list()
self.assertEqual(self.data.symm_list['symmetrixId'], array_list)
def test_get_array(self):
"""Test get_array."""
array_details = self.common.get_array(self.data.array)
self.assertEqual(self.data.symmetrix[0], array_details)
def test_get_wlp_info_success(self):
"""Test get_wlp_information success."""
with mock.patch.object(
self.common, 'get_resource',
return_value=self.data.wlp_info) as mck_wlp_info:
wlp_info = self.common.get_wlp_information(self.data.array)
self.assertEqual(self.data.wlp_info, wlp_info)
mck_wlp_info.assert_called_once_with(
category='wlp', resource_level='symmetrix',
resource_level_id=self.data.array)
def test_get_wlp_info_fail(self):
"""Test get_wlp_information fail."""
with mock.patch.object(self.common, 'get_resource',
return_value=None):
wlp_info = self.common.get_wlp_information(self.data.array)
self.assertFalse(wlp_info)
self.assertIsInstance(wlp_info, dict)
def test_get_headroom_success(self):
"""Test get_headroom success."""
with mock.patch.object(
self.common, 'get_resource',
return_value=self.data.headroom_array) as mck_head:
headroom = self.common.get_headroom(
self.data.array, self.data.workload, 'SRP_TEST', 'Gold')
self.assertEqual(self.data.headroom_array['gbHeadroom'], headroom)
params = {'srp': 'SRP_TEST', 'slo': 'Gold',
'workloadtype': self.data.workload}
mck_head.assert_called_once_with(
category='wlp', resource_level='symmetrix',
resource_level_id=self.data.array, resource_type='headroom',
params=params)
def test_get_headroom_fail(self):
"""Test get_headroom fail."""
with mock.patch.object(self.common, 'get_resource',
return_value=None):
headroom = self.common.get_headroom(self.data.array,
self.data.workload)
self.assertFalse(headroom)
self.assertIsInstance(headroom, list)
def test_check_ipv4(self):
"""Test check_ipv4."""
self.assertTrue(self.common.check_ipv4(self.data.ip))
def test_check_ipv4_fail(self):
"""Test check_ipv4."""
self.assertFalse(self.common.check_ipv4('invalid'))
def test_check_ipv6(self):
"""Test check_ipv6."""
self.assertTrue(self.common.check_ipv6(self.data.ipv6))
def test_check_ipv6_fail(self):
"""Test check_ipv6."""
self.assertFalse(self.common.check_ipv6('invalid'))
def test_get_iterator_page_list(self):
"""Test get_iterator_page_list."""
iterator_page = self.common.get_iterator_page_list('123', 1, 1000)
self.assertEqual(self.data.iterator_page['result'], iterator_page)
def test_get_iterator_results(self):
rest_response_in = self.data.vol_with_pages
ref_response = [{'volumeId': '00001'}, {'volumeId': '00002'}]
response = self.common.get_iterator_results(rest_response_in)
self.assertEqual(response, ref_response)
def test_convert_to_snake_case(self):
"""Test convert_to_snake_case variations."""
string_1 = 'CamelCase'
string_2 = 'camelCase'
string_3 = 'Camel_Case'
string_4 = 'snake_case'
self.assertEqual(self.common.convert_to_snake_case(string_1),
'camel_case')
self.assertEqual(self.common.convert_to_snake_case(string_2),
'camel_case')
self.assertEqual(self.common.convert_to_snake_case(string_3),
'camel_case')
self.assertEqual(self.common.convert_to_snake_case(string_4),
'snake_case')
def test_download_file_success(self):
"""Test download_file success scenario."""
with mock.patch.object(
self.conn.rest_client, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
request_body = {'test_req': True}
response = self.common.download_file(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=request_body)
self.assertIsInstance(response, pf.FakeResponse)
def test_download_file_value_no_response_exception_catch(self):
"""Test download_file with no response, exception caught."""
with mock.patch.object(
self.conn.rest_client, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
with mock.patch.object(
self.common, 'check_status_code_success',
side_effect=ValueError):
request_body = {'test_req': True}
response = self.common.download_file(
category=constants.SYSTEM,
resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=request_body)
self.assertIsInstance(response, pf.FakeResponse)
def test_upload_file_success(self):
"""Test upload_file success scenario."""
with mock.patch.object(
self.conn.rest_client, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
ref_response = {'success': True, 'message': 'OK'}
response = self.common.upload_file(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.IMPORT_FILE,
form_data={'test_req': True})
self.assertEqual(ref_response, response)
def test_upload_file_fail_backend_exception(self):
"""Test upload_file fail with volume backend API exception."""
with mock.patch.object(
self.conn.rest_client, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
with mock.patch.object(
self.conn.rest_client, 'file_transfer_request',
return_value=(pf.FakeResponse(
200, return_object=dict(),
text=self.data.response_string_dict_fail), 200)):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common.upload_file,
category=constants.SYSTEM,
resource_level=constants.SETTINGS,
resource_type=constants.IMPORT_FILE,
form_data={'test_req': True})
def test_upload_file_value_error_exception(self):
"""Test upload_file value error, real call may have been successful."""
with mock.patch.object(
self.conn.rest_client, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
with mock.patch.object(
self.common, 'check_status_code_success',
side_effect=ValueError):
ref_response = {'success': True, 'message': 'OK'}
response = self.common.upload_file(
category=constants.SYSTEM,
resource_level=constants.SETTINGS,
resource_type=constants.IMPORT_FILE,
form_data={'test_req': True})
self.assertEqual(ref_response, response)
def test_check_timestamp(self):
"""Test test_check_timestamp."""
self.assertTrue(self.common.check_timestamp('2020-12-01 15:00'))
self.assertFalse(self.common.check_timestamp('2020-12-01'))
self.assertFalse(self.common.check_timestamp('2020-12-1 15:00'))
self.assertFalse(self.common.check_timestamp('2020-12-d1 15:00'))
self.assertFalse(self.common.check_timestamp('2020-12-01 3pm'))
self.assertFalse(self.common.check_timestamp('1606836037'))
def test_check_epoch_timestamp(self):
"""Test check_epoch_timestamp."""
seconds = str(int(round(time.time())))
self.assertTrue(self.common.check_epoch_timestamp(seconds))
self.assertFalse(self.common.check_epoch_timestamp('160683603'))
self.assertFalse(self.common.check_epoch_timestamp('160683603d'))
self.assertFalse(self.common.check_epoch_timestamp('2020-12-01 15:00'))
millis = str(int(round(time.time() * 1000)))
self.assertTrue(self.common.check_epoch_timestamp(millis))
self.assertFalse(self.common.check_epoch_timestamp(
'160683603111111111'))
|
mit
| 1,726,442,905,896,622,800 | 44.936482 | 79 | 0.606595 | false |
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_decimal.py
|
1
|
209553
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
apache-2.0
| -7,199,043,186,196,745,000 | 36.016958 | 113 | 0.55029 | false |
drJfunk/gbmgeometry
|
gbmgeometry/position_interpolator.py
|
1
|
4903
|
import astropy.io.fits as fits
import astropy.units as u
import numpy as np
import scipy.interpolate as interpolate
from gbmgeometry.utils.gbm_time import GBMTime
class PositionInterpolator(object):
def __init__(self, poshist=None, T0=None, trigdat=None):
# Use position history file
"""
Parameters
----------
poshist
T0
trigdat
Returns
-------
"""
if poshist is not None:
with fits.open(poshist) as poshist:
#poshist = fits.open(poshist)
self._time = poshist['GLAST POS HIST'].data['SCLK_UTC']
self._quats = np.array([poshist['GLAST POS HIST'].data['QSJ_1'],
poshist['GLAST POS HIST'].data['QSJ_2'],
poshist['GLAST POS HIST'].data['QSJ_3'],
poshist['GLAST POS HIST'].data['QSJ_4']]).T
self._sc_pos = np.array([poshist['GLAST POS HIST'].data['POS_X'],
poshist['GLAST POS HIST'].data['POS_Y'],
poshist['GLAST POS HIST'].data['POS_Z']]).T
# if using posthist then units are in m
self._factor = (u.m).to(u.km)
if T0 is not None:
self._time -= T0
self._trigtime = T0
else:
self._trigtime = None
#poshist.close()
elif trigdat is not None:
with fits.open(trigdat) as trigdat:
#trigdat = fits.open(trigdat)
trigtime = trigdat['EVNTRATE'].header['TRIGTIME']
tstart = trigdat['EVNTRATE'].data['TIME'] - trigtime
self._trigtime = trigtime
self._quats = trigdat['EVNTRATE'].data['SCATTITD']
self._sc_pos = trigdat['EVNTRATE'].data['EIC']
sort_mask = np.argsort(tstart)
tstart = tstart[sort_mask]
self._quats = self._quats[sort_mask]
self._sc_pos = self._sc_pos[sort_mask]
self._time = tstart
#trigdat.close()
# the sc is in km so no need to convert
self._factor = 1
else:
print("No file passed. Exiting")
return
# Interpolate the stuf
self._interpolate_quaternion()
self._interpolate_sc_pos()
def utc(self,t):
if self._trigtime is not None:
met = self._trigtime + t
else:
met = t
time = GBMTime.from_MET(met)
#print(time.time.fits)
return time.time.fits
def met(self,t):
if self._trigtime is not None:
met = self._trigtime + t
else:
met = t
return met
def maxtime(self):
return self._time
def quaternion(self, t):
"""
Gets an interpolated quaternion as a function of time
Parameters
----------
t
Returns
-------
A Fermi GBM quaternion
"""
return self._quaternion_t(t)
def sc_pos(self, t):
"""
Parameters
----------
t
Returns
-------
Fermi GBM spacecraft position
"""
return self._scxyz_t(t) * self._factor
def _interpolate_quaternion(self):
self._quaternion_t = interpolate.interp1d(self._time, self._quats.T)
def _interpolate_sc_pos(self):
self._scxyz_t = interpolate.interp1d(self._time, self._sc_pos.T)
def sc_matrix(self, t):
q1, q2, q3, q4 = self.quaternion(t)
sc_matrix = np.zeros((3, 3))
sc_matrix[0, 0] = (q1 ** 2 - q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[0, 1] = 2.0 * (
q1 * q2 + q4 * q3)
sc_matrix[0, 2] = 2.0 * (
q1 * q3 - q4 * q2)
sc_matrix[1, 0] = 2.0 * (
q1 * q2 - q4 * q3)
sc_matrix[1, 1] = (-q1 ** 2 + q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[1, 2] = 2.0 * (
q2 * q3 + q4 * q1)
sc_matrix[2, 0] = 2.0 * (
q1 * q3 + q4 * q2)
sc_matrix[2, 1] = 2.0 * (
q2 * q3 - q4 * q1)
sc_matrix[2, 2] = (-q1 ** 2 - q2 ** 2 + q3
** 2 + q4 ** 2)
return sc_matrix
def geo_matrix(self, t):
return self.sc_matrix(t).T
def altitude(self, t):
"""
:param t:
:return:
"""
earth_radius = 6371.
fermi_radius = np.sqrt((self.sc_pos(t)**2).sum())
return fermi_radius - earth_radius
@staticmethod
def normalize(x):
norm = np.sqrt(np.sum(x ** 2, axis=0))
return x / norm
|
mit
| 1,473,218,808,684,017,200 | 21.490826 | 84 | 0.451152 | false |
rabc/Gitmark
|
flask/testing.py
|
1
|
2318
|
# -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import Client, EnvironBuilder
from flask import _request_ctx_stack
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some
knowledge about how Flask works to defer the cleanup of the
request context stack to the end of a with body when used
in a with statement.
"""
preserve_context = context_preserved = False
def open(self, *args, **kwargs):
if self.context_preserved:
_request_ctx_stack.pop()
self.context_preserved = False
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = EnvironBuilder(*args, **kwargs)
if self.application.config.get('SERVER_NAME'):
server_name = self.application.config.get('SERVER_NAME')
if ':' not in server_name:
http_host, http_port = server_name, None
else:
http_host, http_port = server_name.split(':', 1)
if builder.base_url == 'http://localhost/':
# Default Generated Base URL
if http_port != None:
builder.host = http_host + ':' + http_port
else:
builder.host = http_host
old = _request_ctx_stack.top
try:
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
finally:
self.context_preserved = _request_ctx_stack.top is not old
def __enter__(self):
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
if self.context_preserved:
_request_ctx_stack.pop()
|
mit
| -7,269,470,205,188,622,000 | 33.597015 | 70 | 0.574633 | false |
andreoliw/clitoolkit
|
clit/ui.py
|
1
|
1060
|
"""User interface."""
import sys
import time
from pathlib import Path
from subprocess import PIPE, CalledProcessError
import click
from clit.files import shell
def notify(title, message):
"""If terminal-notifier is installed, use it to display a notification."""
check = "which" if sys.platform == "linux" else "command -v"
try:
terminal_notifier_path = shell("{} terminal-notifier".format(check), check=True, stdout=PIPE).stdout.strip()
except CalledProcessError:
terminal_notifier_path = ""
if terminal_notifier_path:
shell(
'terminal-notifier -title "{}: {} complete" -message "Successfully {} dev environment."'.format(
Path(__file__).name, title, message
)
)
def prompt(message: str, fg: str = "bright_white") -> None:
"""Display a prompt with a message. Wait a little bit before, so stdout is flushed before the input message."""
print()
click.secho(message, fg=fg)
time.sleep(0.2)
input("Press ENTER to continue or Ctrl-C to abort: ")
|
bsd-3-clause
| 1,113,306,305,299,595,500 | 32.125 | 116 | 0.654717 | false |
rr-/docstring_parser
|
docstring_parser/tests/test_numpydoc.py
|
1
|
17338
|
import typing as T
import pytest
from docstring_parser.numpydoc import parse
@pytest.mark.parametrize(
"source, expected",
[
("", None),
("\n", None),
("Short description", "Short description"),
("\nShort description\n", "Short description"),
("\n Short description\n", "Short description"),
],
)
def test_short_description(source: str, expected: str) -> None:
docstring = parse(source)
assert docstring.short_description == expected
assert docstring.long_description is None
assert docstring.meta == []
@pytest.mark.parametrize(
"source, expected_short_desc, expected_long_desc, expected_blank",
[
(
"Short description\n\nLong description",
"Short description",
"Long description",
True,
),
(
"""
Short description
Long description
""",
"Short description",
"Long description",
True,
),
(
"""
Short description
Long description
Second line
""",
"Short description",
"Long description\nSecond line",
True,
),
(
"Short description\nLong description",
"Short description",
"Long description",
False,
),
(
"""
Short description
Long description
""",
"Short description",
"Long description",
False,
),
(
"\nShort description\nLong description\n",
"Short description",
"Long description",
False,
),
(
"""
Short description
Long description
Second line
""",
"Short description",
"Long description\nSecond line",
False,
),
],
)
def test_long_description(
source: str,
expected_short_desc: str,
expected_long_desc: str,
expected_blank: bool,
) -> None:
docstring = parse(source)
assert docstring.short_description == expected_short_desc
assert docstring.long_description == expected_long_desc
assert docstring.blank_after_short_description == expected_blank
assert docstring.meta == []
@pytest.mark.parametrize(
"source, expected_short_desc, expected_long_desc, "
"expected_blank_short_desc, expected_blank_long_desc",
[
(
"""
Short description
Parameters
----------
asd
""",
"Short description",
None,
False,
False,
),
(
"""
Short description
Long description
Parameters
----------
asd
""",
"Short description",
"Long description",
False,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
False,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
True,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
True,
True,
),
(
"""
Parameters
----------
asd
""",
None,
None,
False,
False,
),
],
)
def test_meta_newlines(
source: str,
expected_short_desc: T.Optional[str],
expected_long_desc: T.Optional[str],
expected_blank_short_desc: bool,
expected_blank_long_desc: bool,
) -> None:
docstring = parse(source)
assert docstring.short_description == expected_short_desc
assert docstring.long_description == expected_long_desc
assert docstring.blank_after_short_description == expected_blank_short_desc
assert docstring.blank_after_long_description == expected_blank_long_desc
assert len(docstring.meta) == 1
def test_meta_with_multiline_description() -> None:
docstring = parse(
"""
Short description
Parameters
----------
spam
asd
1
2
3
"""
)
assert docstring.short_description == "Short description"
assert len(docstring.meta) == 1
assert docstring.meta[0].args == ["param", "spam"]
assert docstring.meta[0].arg_name == "spam"
assert docstring.meta[0].description == "asd\n1\n 2\n3"
def test_default_args():
docstring = parse(
"""
A sample function
A function the demonstrates docstrings
Parameters
----------
arg1 : int
The firsty arg
arg2 : str
The second arg
arg3 : float, optional
The third arg. Default is 1.0.
arg4 : Optional[Dict[str, Any]], optional
The fourth arg. Defaults to None
arg5 : str, optional
The fifth arg. Default: DEFAULT_ARGS
Returns
-------
Mapping[str, Any]
The args packed in a mapping
"""
)
assert docstring is not None
assert len(docstring.params) == 5
arg4 = docstring.params[3]
assert arg4.arg_name == "arg4"
assert arg4.is_optional
assert arg4.type_name == "Optional[Dict[str, Any]]"
assert arg4.default == "None"
assert arg4.description == "The fourth arg. Defaults to None"
def test_multiple_meta() -> None:
docstring = parse(
"""
Short description
Parameters
----------
spam
asd
1
2
3
Raises
------
bla
herp
yay
derp
"""
)
assert docstring.short_description == "Short description"
assert len(docstring.meta) == 3
assert docstring.meta[0].args == ["param", "spam"]
assert docstring.meta[0].arg_name == "spam"
assert docstring.meta[0].description == "asd\n1\n 2\n3"
assert docstring.meta[1].args == ["raises", "bla"]
assert docstring.meta[1].type_name == "bla"
assert docstring.meta[1].description == "herp"
assert docstring.meta[2].args == ["raises", "yay"]
assert docstring.meta[2].type_name == "yay"
assert docstring.meta[2].description == "derp"
def test_params() -> None:
docstring = parse("Short description")
assert len(docstring.params) == 0
docstring = parse(
"""
Short description
Parameters
----------
name
description 1
priority : int
description 2
sender : str, optional
description 3
ratio : Optional[float], optional
description 4
"""
)
assert len(docstring.params) == 4
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == "description 1"
assert not docstring.params[0].is_optional
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
assert not docstring.params[1].is_optional
assert docstring.params[2].arg_name == "sender"
assert docstring.params[2].type_name == "str"
assert docstring.params[2].description == "description 3"
assert docstring.params[2].is_optional
assert docstring.params[3].arg_name == "ratio"
assert docstring.params[3].type_name == "Optional[float]"
assert docstring.params[3].description == "description 4"
assert docstring.params[3].is_optional
docstring = parse(
"""
Short description
Parameters
----------
name
description 1
with multi-line text
priority : int
description 2
"""
)
assert len(docstring.params) == 2
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == (
"description 1\n" "with multi-line text"
)
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
def test_attributes() -> None:
docstring = parse("Short description")
assert len(docstring.params) == 0
docstring = parse(
"""
Short description
Attributes
----------
name
description 1
priority : int
description 2
sender : str, optional
description 3
ratio : Optional[float], optional
description 4
"""
)
assert len(docstring.params) == 4
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == "description 1"
assert not docstring.params[0].is_optional
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
assert not docstring.params[1].is_optional
assert docstring.params[2].arg_name == "sender"
assert docstring.params[2].type_name == "str"
assert docstring.params[2].description == "description 3"
assert docstring.params[2].is_optional
assert docstring.params[3].arg_name == "ratio"
assert docstring.params[3].type_name == "Optional[float]"
assert docstring.params[3].description == "description 4"
assert docstring.params[3].is_optional
docstring = parse(
"""
Short description
Attributes
----------
name
description 1
with multi-line text
priority : int
description 2
"""
)
assert len(docstring.params) == 2
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == (
"description 1\n" "with multi-line text"
)
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
def test_other_params() -> None:
docstring = parse(
"""
Short description
Other Parameters
----------------
only_seldom_used_keywords : type, optional
Explanation
common_parameters_listed_above : type, optional
Explanation
"""
)
assert len(docstring.meta) == 2
assert docstring.meta[0].args == [
"other_param",
"only_seldom_used_keywords",
]
assert docstring.meta[0].arg_name == "only_seldom_used_keywords"
assert docstring.meta[0].type_name == "type"
assert docstring.meta[0].is_optional
assert docstring.meta[0].description == "Explanation"
assert docstring.meta[1].args == [
"other_param",
"common_parameters_listed_above",
]
def test_yields() -> None:
docstring = parse(
"""
Short description
Yields
------
int
description
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].args == ["yields"]
assert docstring.meta[0].type_name == "int"
assert docstring.meta[0].description == "description"
assert docstring.meta[0].return_name is None
assert docstring.meta[0].is_generator
def test_returns() -> None:
docstring = parse(
"""
Short description
"""
)
assert docstring.returns is None
docstring = parse(
"""
Short description
Returns
-------
type
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "type"
assert docstring.returns.description is None
docstring = parse(
"""
Short description
Returns
-------
int
description
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "int"
assert docstring.returns.description == "description"
docstring = parse(
"""
Returns
-------
Optional[Mapping[str, List[int]]]
A description: with a colon
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]"
assert docstring.returns.description == "A description: with a colon"
docstring = parse(
"""
Short description
Returns
-------
int
description
with much text
even some spacing
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "int"
assert docstring.returns.description == (
"description\n" "with much text\n\n" "even some spacing"
)
def test_raises() -> None:
docstring = parse(
"""
Short description
"""
)
assert len(docstring.raises) == 0
docstring = parse(
"""
Short description
Raises
------
ValueError
description
"""
)
assert len(docstring.raises) == 1
assert docstring.raises[0].type_name == "ValueError"
assert docstring.raises[0].description == "description"
def test_warns() -> None:
docstring = parse(
"""
Short description
Warns
-----
UserWarning
description
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].type_name == "UserWarning"
assert docstring.meta[0].description == "description"
def test_simple_sections() -> None:
docstring = parse(
"""
Short description
See Also
--------
something : some thing you can also see
actually, anything can go in this section
Warnings
--------
Here be dragons
Notes
-----
None of this is real
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
"""
)
assert len(docstring.meta) == 4
assert docstring.meta[0].args == ["see_also"]
assert docstring.meta[0].description == (
"something : some thing you can also see\n"
"actually, anything can go in this section"
)
assert docstring.meta[1].args == ["warnings"]
assert docstring.meta[1].description == "Here be dragons"
assert docstring.meta[2].args == ["notes"]
assert docstring.meta[2].description == "None of this is real"
assert docstring.meta[3].args == ["references"]
def test_examples() -> None:
docstring = parse(
"""
Short description
Examples
--------
long example
more here
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].description == "long example\n\nmore here"
@pytest.mark.parametrize(
"source, expected_depr_version, expected_depr_desc",
[
(
"Short description\n\n.. deprecated:: 1.6.0\n This is busted!",
"1.6.0",
"This is busted!",
),
(
(
"Short description\n\n"
".. deprecated:: 1.6.0\n"
" This description has\n"
" multiple lines!"
),
"1.6.0",
"This description has\nmultiple lines!",
),
("Short description\n\n.. deprecated:: 1.6.0", "1.6.0", None),
(
"Short description\n\n.. deprecated::\n No version!",
None,
"No version!",
),
],
)
def test_deprecation(
source: str,
expected_depr_version: T.Optional[str],
expected_depr_desc: T.Optional[str],
) -> None:
docstring = parse(source)
assert docstring.deprecation is not None
assert docstring.deprecation.version == expected_depr_version
assert docstring.deprecation.description == expected_depr_desc
|
mit
| 7,615,288,201,455,690,000 | 25.229955 | 79 | 0.537836 | false |
mark-burnett/filament-dynamics
|
actin_dynamics/numerical/correlation.py
|
1
|
2220
|
# Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import math
import numpy
def collection_stats(value_collection):
all_vals = numpy.fromiter(itertools.chain(*value_collection), dtype=float)
mean = numpy.mean(all_vals)
std = numpy.std(all_vals)
return mean, std
def aggregate_autocorrelation(sample_period, value_collection):
big_mean, big_std = collection_stats(value_collection)
correlation_collection = [
autocorrelation(values, mean=big_mean, std=big_std)
for values in value_collection]
maxlen = max(map(len, correlation_collection))
collated_correlations = []
for i in xrange(maxlen):
local_correlations = []
collated_correlations.append(local_correlations)
for correlations in correlation_collection:
if i < len(correlations):
local_correlations.append(correlations[i])
taus = numpy.arange(maxlen) * sample_period
means = [numpy.mean(acs) for acs in collated_correlations]
return taus, means, [0 for t in taus]
def autocorrelation(values, mean=None, std=None):
'''
Perform a proper statistical autocorrelation.
'''
values = numpy.array(values, dtype=float)
if not mean:
mean = numpy.mean(values)
if not std:
std = numpy.std(values)
values = values - mean
length = len(values)
result = [sum(values**2)/length]
for i in xrange(1, len(values)):
result.append((values[i:]*values[:-i]).sum()/(length - i))
return numpy.array(result) / std**2
|
gpl-3.0
| 3,708,285,325,205,364,000 | 31.647059 | 78 | 0.678378 | false |
izapolsk/integration_tests
|
cfme/tests/configure/test_zones.py
|
1
|
7003
|
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.base.ui import ZoneAddView
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.update import update
pytestmark = [test_requirements.configuration]
NAME_LEN = 5
DESC_LEN = 8
# Finalizer method that clicks Cancel for tests that expect zone
# creation to fail. This avoids logging of UnexpectedAlertPresentException
# for the 'Abandon changes?' alert when the next test tries to navigate
# elsewhere in the UI.
def cancel_zone_add(appliance):
view = appliance.browser.create_view(ZoneAddView)
if view.is_displayed:
view.cancel_button.click()
def create_zone(appliance, name_len=NAME_LEN, desc_len=DESC_LEN):
zc = appliance.collections.zones
region = appliance.server.zone.region
# CREATE
name = fauxfactory.gen_alphanumeric(name_len)
description = fauxfactory.gen_alphanumeric(desc_len)
zc.create(name=name, description=description)
# query to get the newly-created zone's id
zc.filters = {'parent': region}
zones = zc.all()
new_zone = None
for zone in zones:
if (zone.name == name and zone.description == description):
new_zone = zone
break
else:
pytest.fail(
f'Zone matching name ({name}) and \
description ({description}) not found in the collection')
return new_zone
@pytest.mark.tier(1)
@pytest.mark.sauce
def test_zone_crud(appliance):
"""
Bugzilla:
1216224
Polarion:
assignee: tpapaioa
caseimportance: low
initialEstimate: 1/15h
casecomponent: WebUI
"""
zone = create_zone(appliance)
assert zone.exists, 'Zone could not be created.'
# UPDATE
with update(zone):
zone.description = f'{zone.description}_updated'
try:
navigate_to(zone, 'Zone')
except ItemNotFound:
pytest.fail(f'Zone {zone.description} could not be updated.')
# DELETE
zone.delete()
assert (not zone.exists), f'Zone {zone.description} could not be deleted.'
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_zone_add_cancel_validation(appliance):
"""
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/20h
"""
appliance.collections.zones.create(
name=fauxfactory.gen_alphanumeric(NAME_LEN),
description=fauxfactory.gen_alphanumeric(DESC_LEN),
cancel=True
)
@pytest.mark.tier(2)
@pytest.mark.sauce
def test_zone_change_appliance_zone(request, appliance):
""" Tests that an appliance can be changed to another Zone
Bugzilla:
1216224
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/15h
"""
zone = create_zone(appliance)
request.addfinalizer(zone.delete)
server_settings = appliance.server.settings
request.addfinalizer(lambda: server_settings.update_basic_information(
{'appliance_zone': 'default'}))
server_settings.update_basic_information({'appliance_zone': zone.name})
assert (zone.description == appliance.server.zone.description)
@pytest.mark.tier(2)
@pytest.mark.sauce
def test_zone_add_dupe(request, appliance):
"""
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/4h
"""
zone = create_zone(appliance)
request.addfinalizer(zone.delete)
request.addfinalizer(lambda: cancel_zone_add(appliance))
with pytest.raises(
Exception,
match=f'Name is not unique within region {appliance.server.zone.region.number}'
):
appliance.collections.zones.create(
name=zone.name,
description=zone.description
)
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_zone_add_maxlength(request, appliance):
"""
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/4h
"""
zone = create_zone(appliance, name_len=50, desc_len=50)
request.addfinalizer(zone.delete)
assert zone.exists, f'Zone does not exist.'
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_zone_add_blank_name(request, appliance):
"""
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: medium
caseposneg: negative
initialEstimate: 1/8h
"""
request.addfinalizer(lambda: cancel_zone_add(appliance))
with pytest.raises(Exception, match="Name can't be blank"):
appliance.collections.zones.create(
name='',
description=fauxfactory.gen_alphanumeric(DESC_LEN)
)
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_zone_add_blank_description(request, appliance):
"""
Polarion:
assignee: tpapaioa
casecomponent: WebUI
caseimportance: medium
caseposneg: negative
initialEstimate: 1/8h
"""
request.addfinalizer(lambda: cancel_zone_add(appliance))
with pytest.raises(Exception, match=r"(Description can't be blank|Description is required)"):
appliance.collections.zones.create(
name=fauxfactory.gen_alphanumeric(NAME_LEN),
description=''
)
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_add_zone_windows_domain_credentials(request, appliance):
"""
Testing Windows Domain credentials add
Polarion:
assignee: tpapaioa
initialEstimate: 1/4h
casecomponent: WebUI
"""
zone = appliance.collections.zones.all()[0]
values = {'username': 'userid',
'password': 'password',
'verify': 'password'}
zone.update(values)
def _cleanup():
values = {'username': '',
'password': '',
'verify': ''}
zone.update(values)
request.addfinalizer(_cleanup)
view = navigate_to(zone, 'Edit')
username = view.username.read()
assert username == values['username'], f'Current username is {username}'
@pytest.mark.tier(3)
@pytest.mark.sauce
def test_remove_zone_windows_domain_credentials(appliance):
"""
Testing Windows Domain credentials removal
Polarion:
assignee: tpapaioa
initialEstimate: 1/4h
casecomponent: WebUI
"""
zone = appliance.collections.zones.all()[0]
values = {'username': 'userid',
'password': 'password',
'verify': 'password'}
zone.update(values)
view = navigate_to(zone, 'Edit')
username = view.username.read()
assert username == values['username'], "Username wasn't updated"
values = {'username': '',
'password': '',
'verify': ''}
zone.update(values)
view = navigate_to(zone, 'Edit')
username = view.username.read()
assert username == values['username'], "Username wasn't removed"
|
gpl-2.0
| 8,238,792,760,051,305,000 | 26.249027 | 97 | 0.648865 | false |
CamDavidsonPilon/lifetimes
|
docs/conf.py
|
1
|
5841
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# lifetimes documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 7 14:10:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Convert package README.md to intro.rst to include in index.rst for docs
try:
import pypandoc
long_description = pypandoc.convert_file("../README.md", "rst", outputfile="intro.rst")
except (ImportError):
print("Install pypandoc to convert README.md to intro.rst")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# for parsing markdown files
source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"}
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "lifetimes"
copyright = "2015, Cameron Davidson-Pilon"
author = "Cameron Davidson-Pilon"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.11.2"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Napoleon settings ----------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
napoleon_use_keyword = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "lifetimesdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "lifetimes.tex", "lifetimes Documentation", "Cameron Davidson-Pilon", "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "lifetimes", "lifetimes Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"lifetimes",
"lifetimes Documentation",
author,
"lifetimes",
"One line description of project.",
"Miscellaneous",
)
]
|
mit
| 8,542,328,529,899,552,000 | 30.744565 | 112 | 0.677795 | false |
8l/beri
|
cheritest/trunk/tests/fpu/test_raw_fpu_denorm.py
|
2
|
1951
|
#-
# Copyright (c) 2013 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
#
# Test floating point operations that give a denormalized result
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_fpu_denorm(BaseBERITestCase):
def test_raw_fpu_denorm_add(self):
'''Test that add.s flushes a denormalized result to zero'''
self.assertRegisterEqual(self.MIPS.a0, 0, "Denormalized result of add.s was not flushed to zero")
def test_raw_fpu_denorm_sub(self):
'''Test that sub.s flushes a denormalized result to zero'''
self.assertRegisterEqual(self.MIPS.a1, 0, "Denormalized result of sub.s was not flushed to zero")
def test_raw_fpu_denorm_mul(self):
'''Test that mul.s flushes a denormalized result to zero'''
self.assertRegisterEqual(self.MIPS.a2, 0, "Denormalized result of mul.s was not flushed to zero")
|
apache-2.0
| -7,927,294,712,560,409,000 | 39.645833 | 98 | 0.75141 | false |
vrthra/pygram
|
tests/test_accesslog.py
|
1
|
16740
|
from urllib.parse import urlparse
import induce
import collections
import random
import accesslog
random.seed(0)
def test_accesslog1():
content_lines = '''
1.1.1.1 - - [21/Feb/2014:06:35:45 +0100] "GET /robots.txt HTTP/1.1" 200 112 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
'''[1:-1]
grammar = '''
$START ::= $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:35:45 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 112 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
$LOGANALYZER.ANALYZE:USERAGENT ::= $LOGANALYZER.SUMMARIZE:COL.USERAGENT
$LOGANALYZER.ANALYZE:REQUEST ::= $LOGANALYZER.SUMMARIZE:COL.REQUEST
$LOGANALYZER.ANALYZE:IP ::= $LOGANALYZER.SUMMARIZE:COL.IP
$LOGANALYZER.SUMMARIZE:COL.USERAGENT ::= Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)
$LOGANALYZER.SUMMARIZE:COL.REQUEST ::= GET /robots.txt HTTP/1.1
$LOGANALYZER.SUMMARIZE:COL.IP ::= 1.1.1.1
'''[1:-1]
result = []
for line in content_lines.split('\n'):
with induce.Tracer(line, result) as t:
summary = accesslog.LogAnalyzer(line, 5)
summary.analyze()
with induce.grammar() as g:
for jframe in result: g.handle_events(jframe)
print(str(g))
assert(grammar == str(g))
def test_accesslog2():
content_lines = '''
1.1.1.1 - - [21/Feb/2014:06:35:45 +0100] "GET /robots.txt HTTP/1.1" 200 112 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
1.1.1.1 - - [21/Feb/2014:06:35:45 +0100] "GET /blog.css HTTP/1.1" 200 3663 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
2.2.2.2 - - [21/Feb/2014:06:52:04 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
2.2.2.2 - - [21/Feb/2014:06:52:04 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
3.3.3.3 - - [21/Feb/2014:06:58:14 +0100] "/" 200 1664 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
4.4.4.4 - - [21/Feb/2014:07:22:03 +0100] "/" 200 1664 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
5.5.5.5 - - [21/Feb/2014:07:32:48 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:07:32:48 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:08:13:01 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:08:13:01 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
7.7.7.7 - - [21/Feb/2014:08:51:25 +0100] "GET /main.php HTTP/1.1" 200 3681 "-" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461)"
7.7.7.7 - - [21/Feb/2014:08:51:34 +0100] "-" 400 0 "-" "-"
7.7.7.7 - - [21/Feb/2014:08:51:48 +0100] "GET /tag/php.php HTTP/1.1" 200 4673 "-" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461)"
8.8.8.8 - - [21/Feb/2014:08:53:43 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
8.8.8.8 - - [21/Feb/2014:08:53:43 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
9.9.9.9 - - [21/Feb/2014:09:18:40 +0100] "-" 400 0 "-" "-"
9.9.9.9 - - [21/Feb/2014:09:18:40 +0100] "GET /main HTTP/1.1" 200 3681 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
9.9.9.9 - - [21/Feb/2014:09:18:41 +0100] "GET /phpMyAdmin/scripts/setup.php HTTP/1.1" 404 27 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
9.9.9.9 - - [21/Feb/2014:09:18:42 +0100] "GET /pma/scripts/setup.php HTTP/1.1" 404 27 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
10.10.10.10 - - [21/Feb/2014:09:21:29 +0100] "-" 400 0 "-" "-"
10.10.10.10 - - [21/Feb/2014:09:21:29 +0100] "GET /main.php HTTP/1.1" 200 3681 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
10.10.10.10 - - [21/Feb/2014:09:21:30 +0100] "GET /about.php HTTP/1.1" 200 2832 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
10.10.10.10 - - [21/Feb/2014:09:21:30 +0100] "GET /tag/nginx.php HTTP/1.1" 200 3295 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
10.10.10.10 - - [21/Feb/2014:09:21:31 +0100] "GET /how-to-setup.php HTTP/1.1" 200 2637 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117"
1.1.1.1 - - [21/Feb/2014:09:27:27 +0100] "GET /robots.txt HTTP/1.1" 200 112 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
1.1.1.1 - - [21/Feb/2014:09:27:27 +0100] "GET /tag/tor.php HTTP/1.1" 200 2041 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
5.5.5.5 - - [21/Feb/2014:10:14:37 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:10:14:37 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
8.8.8.8 - - [21/Feb/2014:10:55:19 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
8.8.8.8 - - [21/Feb/2014:10:55:19 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
1.1.1.1 - - [21/Feb/2014:11:19:05 +0100] "GET /robots.txt HTTP/1.1" 200 112 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
1.1.1.1 - - [21/Feb/2014:11:19:06 +0100] "GET /robots.txt HTTP/1.1" 200 112 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
1.1.1.1 - - [21/Feb/2014:11:19:06 +0100] "GET / HTTP/1.1" 200 3649 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
6.6.6.6 - - [21/Feb/2014:12:16:14 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:12:16:15 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:14:17:52 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:14:17:52 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:14:58:04 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:14:58:04 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:15:38:46 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:15:38:47 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
2.2.2.2 - - [21/Feb/2014:18:20:36 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
2.2.2.2 - - [21/Feb/2014:18:20:37 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:19:42:00 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [21/Feb/2014:19:42:00 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
2.2.2.2 - - [21/Feb/2014:20:22:13 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
2.2.2.2 - - [21/Feb/2014:20:22:13 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:21:02:55 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
6.6.6.6 - - [21/Feb/2014:21:02:55 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
8.8.8.8 - - [22/Feb/2014:01:05:37 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
8.8.8.8 - - [22/Feb/2014:01:05:38 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
8.8.8.8 - - [22/Feb/2014:04:28:10 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
8.8.8.8 - - [22/Feb/2014:04:28:10 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
2.2.2.2 - - [22/Feb/2014:05:49:34 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
2.2.2.2 - - [22/Feb/2014:05:49:34 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
5.5.5.5 - - [22/Feb/2014:06:29:47 +0100] "GET /main/rss HTTP/1.1" 301 178 "-" "Motorola"
5.5.5.5 - - [22/Feb/2014:06:29:47 +0100] "GET /feed/atom.xml HTTP/1.1" 304 0 "-" "Motorola"
'''[1:-1]
grammar = '''
$START ::= $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:35:45 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 112 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:35:45 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3663 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:52:04 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:52:04 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:06:58:14 +0100] "/" 200 1664 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:07:22:03 +0100] "/" 200 1664 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:07:32:48 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:07:32:48 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:13:01 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:13:01 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:51:25 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3681 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:51:48 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 4673 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:53:43 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:53:43 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:18:40 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3681 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:18:41 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 404 27 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:18:42 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 404 27 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:21:29 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3681 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:21:30 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 2832 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:21:30 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3295 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:21:31 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 2637 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:27:27 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 112 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:27:27 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 2041 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:10:14:37 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:10:14:37 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:10:55:19 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:10:55:19 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:11:19:05 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 112 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:11:19:06 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 112 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:11:19:06 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 200 3649 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:12:16:14 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:12:16:15 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:14:17:52 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:14:17:52 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:14:58:04 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:14:58:04 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:15:38:46 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:15:38:47 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:18:20:36 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:18:20:37 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:19:42:00 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:19:42:00 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:20:22:13 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:20:22:13 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:21:02:55 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:21:02:55 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:01:05:37 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:01:05:38 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:04:28:10 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:04:28:10 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:05:49:34 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:05:49:34 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:06:29:47 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 301 178 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.ANALYZE:IP - - [22/Feb/2014:06:29:47 +0100] "$LOGANALYZER.ANALYZE:REQUEST" 304 0 "-" "$LOGANALYZER.ANALYZE:USERAGENT"
| $LOGANALYZER.__INIT__:CONTENT
$LOGANALYZER.ANALYZE:USERAGENT ::= $LOGANALYZER.SUMMARIZE:COL.USERAGENT
$LOGANALYZER.ANALYZE:REQUEST ::= $LOGANALYZER.SUMMARIZE:COL.REQUEST
$LOGANALYZER.ANALYZE:IP ::= $LOGANALYZER.SUMMARIZE:COL.IP
$LOGANALYZER.SUMMARIZE:COL.USERAGENT ::= Motorola
| Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461)
| Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117
| Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)
$LOGANALYZER.SUMMARIZE:COL.REQUEST ::= GET / HTTP/1.1
| GET /about.php HTTP/1.1
| GET /blog.css HTTP/1.1
| GET /feed/atom.xml HTTP/1.1
| GET /how-to-setup.php HTTP/1.1
| GET /main HTTP/1.1
| GET /main.php HTTP/1.1
| GET /main/rss HTTP/1.1
| GET /phpMyAdmin/scripts/setup.php HTTP/1.1
| GET /pma/scripts/setup.php HTTP/1.1
| GET /robots.txt HTTP/1.1
| GET /tag/nginx.php HTTP/1.1
| GET /tag/php.php HTTP/1.1
| GET /tag/tor.php HTTP/1.1
$LOGANALYZER.SUMMARIZE:COL.IP ::= 1.1.1.1
| 10.10.10.10
| 2.2.2.2
| 3.3.3.3
| 4.4.4.4
| 5.5.5.5
| 6.6.6.6
| 7.7.7.7
| 8.8.8.8
| 9.9.9.9
$LOGANALYZER.__INIT__:CONTENT ::= $LOGANALYZER.ANALYZE:LINE
$LOGANALYZER.ANALYZE:LINE ::= $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:08:51:34 +0100] "-" 400 0 "-" "-"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:18:40 +0100] "-" 400 0 "-" "-"
| $LOGANALYZER.ANALYZE:IP - - [21/Feb/2014:09:21:29 +0100] "-" 400 0 "-" "-"
'''[1:-1]
result = []
for line in content_lines.split('\n'):
with induce.Tracer(line, result) as t:
summary = accesslog.LogAnalyzer(line, 5)
summary.analyze()
with induce.grammar() as g:
for jframe in result: g.handle_events(jframe)
print(str(g))
assert(grammar == str(g))
|
gpl-3.0
| 5,739,183,233,330,992,000 | 84.846154 | 190 | 0.665054 | false |
PaddlePaddle/models
|
dygraph/sentiment/nets.py
|
1
|
10899
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, Embedding
from paddle.fluid.dygraph import GRUUnit
from paddle.fluid.dygraph.base import to_variable
import numpy as np
class DynamicGRU(fluid.dygraph.Layer):
def __init__(self,
size,
param_attr=None,
bias_attr=None,
is_reverse=False,
gate_activation='sigmoid',
candidate_activation='tanh',
h_0=None,
origin_mode=False,
init_size = None):
super(DynamicGRU, self).__init__()
self.gru_unit = GRUUnit(
size * 3,
param_attr=param_attr,
bias_attr=bias_attr,
activation=candidate_activation,
gate_activation=gate_activation,
origin_mode=origin_mode)
self.size = size
self.h_0 = h_0
self.is_reverse = is_reverse
def forward(self, inputs):
hidden = self.h_0
res = []
for i in range(inputs.shape[1]):
if self.is_reverse:
i = inputs.shape[1] - 1 - i
input_ = inputs[ :, i:i+1, :]
input_ = fluid.layers.reshape(input_, [-1, input_.shape[2]], inplace=False)
hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = fluid.layers.reshape(hidden, [-1, 1, hidden.shape[1]], inplace=False)
res.append(hidden_)
if self.is_reverse:
res = res[::-1]
res = fluid.layers.concat(res, axis=1)
return res
class SimpleConvPool(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
use_cudnn=False,
batch_size=None):
super(SimpleConvPool, self).__init__()
self.batch_size = batch_size
self._conv2d = Conv2D(num_channels = num_channels,
num_filters=num_filters,
filter_size=filter_size,
padding=[1, 1],
use_cudnn=use_cudnn,
act='tanh')
def forward(self, inputs):
x = self._conv2d(inputs)
x = fluid.layers.reduce_max(x, dim=-1)
x = fluid.layers.reshape(x, shape=[self.batch_size, -1])
return x
class CNN(fluid.dygraph.Layer):
def __init__(self, dict_dim, batch_size, seq_len):
super(CNN, self).__init__()
self.dict_dim = dict_dim
self.emb_dim = 128
self.hid_dim = 128
self.fc_hid_dim = 96
self.class_dim = 2
self.channels = 1
self.win_size = [3, self.hid_dim]
self.batch_size = batch_size
self.seq_len = seq_len
self.embedding = Embedding(
size=[self.dict_dim + 1, self.emb_dim],
dtype='float32',
is_sparse=False)
self._simple_conv_pool_1 = SimpleConvPool(
self.channels,
self.hid_dim,
self.win_size,
batch_size=self.batch_size)
self._fc1 = Linear(input_dim = self.hid_dim*self.seq_len, output_dim=self.fc_hid_dim, act="softmax")
self._fc_prediction = Linear(input_dim = self.fc_hid_dim,
output_dim = self.class_dim,
act="softmax")
def forward(self, inputs, label=None):
emb = self.embedding(inputs)
o_np_mask = (inputs.numpy().reshape(-1,1) != self.dict_dim).astype('float32')
mask_emb = fluid.layers.expand(
to_variable(o_np_mask), [1, self.hid_dim])
emb = emb * mask_emb
emb = fluid.layers.reshape(
emb, shape=[-1, self.channels , self.seq_len, self.hid_dim])
conv_3 = self._simple_conv_pool_1(emb)
fc_1 = self._fc1(conv_3)
prediction = self._fc_prediction(fc_1)
if label:
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc
else:
return prediction
class BOW(fluid.dygraph.Layer):
def __init__(self, dict_dim, batch_size, seq_len):
super(BOW, self).__init__()
self.dict_dim = dict_dim
self.emb_dim = 128
self.hid_dim = 128
self.fc_hid_dim = 96
self.class_dim = 2
self.batch_size = batch_size
self.seq_len = seq_len
self.embedding = Embedding(
size=[self.dict_dim + 1, self.emb_dim],
dtype='float32',
is_sparse=False)
self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim, act="tanh")
self._fc2 = Linear(input_dim = self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(input_dim = self.fc_hid_dim,
output_dim = self.class_dim,
act="softmax")
def forward(self, inputs, label=None):
emb = self.embedding(inputs)
o_np_mask = (inputs.numpy().reshape(-1,1) != self.dict_dim).astype('float32')
mask_emb = fluid.layers.expand(
to_variable(o_np_mask), [1, self.hid_dim])
emb = emb * mask_emb
emb = fluid.layers.reshape(
emb, shape=[-1, self.seq_len, self.hid_dim])
bow_1 = fluid.layers.reduce_sum(emb, dim=1)
bow_1 = fluid.layers.tanh(bow_1)
fc_1 = self._fc1(bow_1)
fc_2 = self._fc2(fc_1)
prediction = self._fc_prediction(fc_2)
if label is not None:
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc
else:
return prediction
class GRU(fluid.dygraph.Layer):
def __init__(self, dict_dim, batch_size, seq_len):
super(GRU, self).__init__()
self.dict_dim = dict_dim
self.emb_dim = 128
self.hid_dim = 128
self.fc_hid_dim = 96
self.class_dim = 2
self.batch_size = batch_size
self.seq_len = seq_len
self.embedding = Embedding(
size=[self.dict_dim + 1, self.emb_dim],
dtype='float32',
param_attr=fluid.ParamAttr(learning_rate=30),
is_sparse=False)
h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
h_0 = to_variable(h_0)
self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim*3)
self._fc2 = Linear(input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
output_dim=self.class_dim,
act="softmax")
self._gru = DynamicGRU( size= self.hid_dim, h_0=h_0)
def forward(self, inputs, label=None):
emb = self.embedding(inputs)
o_np_mask =to_variable(inputs.numpy().reshape(-1,1) != self.dict_dim).astype('float32')
mask_emb = fluid.layers.expand(
to_variable(o_np_mask), [1, self.hid_dim])
emb = emb * mask_emb
emb = fluid.layers.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
fc_1 = self._fc1(emb)
gru_hidden = self._gru(fc_1)
gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1)
tanh_1 = fluid.layers.tanh(gru_hidden)
fc_2 = self._fc2(tanh_1)
prediction = self._fc_prediction(fc_2)
if label:
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc
else:
return prediction
class BiGRU(fluid.dygraph.Layer):
def __init__(self, dict_dim, batch_size, seq_len):
super(BiGRU, self).__init__()
self.dict_dim = dict_dim
self.emb_dim = 128
self.hid_dim = 128
self.fc_hid_dim = 96
self.class_dim = 2
self.batch_size = batch_size
self.seq_len = seq_len
self.embedding = Embedding(
size=[self.dict_dim + 1, self.emb_dim],
dtype='float32',
param_attr=fluid.ParamAttr(learning_rate=30),
is_sparse=False)
h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
h_0 = to_variable(h_0)
self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim*3)
self._fc2 = Linear(input_dim = self.hid_dim*2, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
output_dim=self.class_dim,
act="softmax")
self._gru_forward = DynamicGRU( size= self.hid_dim, h_0=h_0,is_reverse=False)
self._gru_backward = DynamicGRU( size= self.hid_dim, h_0=h_0,is_reverse=True)
def forward(self, inputs, label=None):
emb = self.embedding(inputs)
o_np_mask =to_variable(inputs.numpy() .reshape(-1,1)!= self.dict_dim).astype('float32')
mask_emb = fluid.layers.expand(
to_variable(o_np_mask), [1, self.hid_dim])
emb = emb * mask_emb
emb = fluid.layers.reshape(emb, shape=[self.batch_size, -1, self.hid_dim])
fc_1 = self._fc1(emb)
gru_forward = self._gru_forward(fc_1)
gru_backward = self._gru_backward(fc_1)
gru_forward_tanh = fluid.layers.tanh(gru_forward)
gru_backward_tanh = fluid.layers.tanh(gru_backward)
encoded_vector = fluid.layers.concat(
input=[gru_forward_tanh, gru_backward_tanh], axis=2)
encoded_vector = fluid.layers.reduce_max(encoded_vector, dim=1)
fc_2 = self._fc2(encoded_vector)
prediction = self._fc_prediction(fc_2)
if label:
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, prediction, acc
else:
return prediction
|
apache-2.0
| -5,716,231,717,612,682,000 | 40.758621 | 108 | 0.565373 | false |
nhmc/LAE
|
cloudy/find_par.py
|
1
|
13374
|
from __future__ import division
from math import log, sqrt, pi
from barak.utilities import adict
from barak.absorb import split_trans_name
from barak.io import parse_config, loadobj
from barak.interp import AkimaSpline, MapCoord_Interpolator
from cloudy.utils import read_observed
import numpy as np
import os
from glob import glob
from barak.plot import get_nrows_ncols, puttext
from matplotlib.ticker import AutoMinorLocator
import astropy.constants as c
import astropy.units as u
from astropy.table import Table
import pylab as plt
import sys
# dex 1 sigma error in UVB (and so nH)
Unorm_sig = 0.3
USE_HEXBIN = True
def make_cmap_red():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,9)
cm = plt.cm.Reds(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('red_nhmc', cdict)
def make_cmap_blue():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,15)
cm = plt.cm.Blues(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
r[1:10] = r[4:13]
g[1:10] = g[4:13]
b[1:10] = b[4:13]
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('blue_nhmc', cdict)
def find_min_interval(x, alpha):
""" Determine the minimum interval containing a given probability.
x is an array of parameter values (such as from an MCMC trace).
alpha (0 -> 1) is the desired probability encompassed by the
interval.
Inspired by the pymc function of the same name.
"""
assert len(x) > 1
x = np.sort(x)
# Initialize interval
min_int = None, None
# Number of elements in trace
n = len(x)
# Start at far left
end0 = int(n*alpha)
start, end = 0, end0
# Initialize minimum width to large value
min_width = np.inf
for i in xrange(n - end0):
hi, lo = x[end+i], x[start+i]
width = hi - lo
if width < min_width:
min_width = width
min_int = lo, hi
return min_int
def make_interpolators_uvbtilt(trans, simnames):
""" Make interpolators including different UV slopes, given by the
simulation names.
simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),
uvb k values must be sorted in ascending order!
"""
Models = []
aUV = []
for simname in simnames:
# need to define prefix, SIMNAME
gridname = os.path.join(simname, 'grid.cfg')
#print 'Reading', gridname
cfg = parse_config(gridname)
aUV.append(cfg.uvb_tilt)
name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
#print 'Reading', name
M = loadobj(name)
M = adict(M)
Uconst = (M.U + M.nH)[0]
#print 'Uconst', Uconst, cfg.uvb_tilt
assert np.allclose(Uconst, M.U + M.nH)
Models.append(M)
##########################################################################
# Interpolate cloudy grids onto a finer scale for plotting and
# likelihood calculation
##########################################################################
roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
Ncloudy = {}
Ncloudy_raw = {}
#print 'Interpolating...'
for tr in trans:
shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
Nvals = np.zeros(shape)
if tr == 'Tgas':
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M['Tgas'][:,:,:,0]
elif tr == 'NH':
for i,M in enumerate(Models):
logNHI = M.N['H'][:,:,:,0]
logNHII = M.N['H'][:,:,:,1]
logNHtot = np.log10(10**logNHI + 10**logNHII)
Nvals[:,:,:,i] = logNHtot
elif tr in ['CII*']:
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
else:
atom, stage = split_trans_name(tr)
ind = roman_map[stage]
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]
# use ndimage.map_coordinates (which is spline interpolation)
coord = M.NHI, M.nH, M.Z, aUV
try:
Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
except:
import pdb; pdb.set_trace()
Ncloudy_raw[tr] = Nvals
#print 'done'
return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
def triplot(names, vals, sigvals, fig, indirect={}, labels=None, fontsize=14):
from barak.plot import hist_yedge, hist_xedge, puttext
npar = len(names)
bins = {}
for n in names:
x0, x1 = vals[n].min(), vals[n].max()
dx = x1 - x0
lo = x0 - 0.1*dx
hi = x1 + 0.1*dx
bins[n] = np.linspace(lo, hi, 20)
axes = {}
for i0,n0 in enumerate(names):
for i1,n1 in enumerate(names):
if i0 == i1:# or i1 < i0: # uncomment to keep just one triangle.
continue
ax = fig.add_subplot(npar,npar, i0 * npar + i1 + 1)
ax.locator_params(tight=True, nbins=8)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
axes[(n0 + ' ' + n1)] = ax
y,x = vals[n0], vals[n1]
if USE_HEXBIN:
ax.hexbin(x,y,cmap=CM, gridsize=40,linewidths=0.1)
else:
ax.plot(x,y,'r.', ms=0.5, mew=0)#, alpha=0.5)
color = 'k' if n0 not in indirect else 'g'
text = labels[n0] if labels is not None else n0
puttext(0.05, 0.95, text, ax, color=color ,fontsize=fontsize, va='top')
color = 'k' if n1 not in indirect else 'g'
text = labels[n1] if labels is not None else n1
puttext(0.95, 0.08, text, ax, color=color ,fontsize=fontsize, ha='right')
# set limits
y0, y1 = np.percentile(vals[n0], [5, 95])
dy = y1 - y0
ax.set_ylim(y0 - dy, y1 + dy)
x0, x1 = np.percentile(vals[n1], [5, 95])
dx = x1 - x0
ax.set_xlim(x0 - dx, x1 + dx)
c = 'k'
if i0 == 0:
ax.xaxis.set_tick_params(labeltop='on')
ax.xaxis.set_tick_params(labelbottom='off')
for t in ax.get_xticklabels():
t.set_rotation(60)
elif i0 == npar-1 or (i0 == npar-2 and i1 == npar-1):
hist_xedge(vals[n1], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n1], loc='bottom')
ax.axvline(sigvals[n1][0], ymax=0.2, color=c, lw=0.5)
ax.axvline(sigvals[n1][1], ymax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n1]) / 2.
ax.axvline(cen, ymax=0.2, color=c, lw=1.5)
for t in ax.get_xticklabels():
t.set_rotation(60)
else:
ax.set_xticklabels('')
if not (i1 == 0 or (i0 == 0 and i1 == 1) or i1 == npar-1):
ax.set_yticklabels('')
if (i0 == 0 and i1 == 1) or i1 == 0:
hist_yedge(vals[n0], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n0], loc='left')
ax.axhline(sigvals[n0][0], xmax=0.2, color=c, lw=0.5)
ax.axhline(sigvals[n0][1], xmax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n0]) / 2.
ax.axhline(cen, xmax=0.2, color=c, lw=1.5)
if i1 == npar - 1:
ax.yaxis.set_tick_params(labelright='on')
ax.yaxis.set_tick_params(labelleft='off')
#ax.minorticks_on()
return axes
if 1:
print_header = False
if len(sys.argv[1:]) > 0 and sys.argv[1] == '--header':
print_header = True
if 1:
##################################################
# Read configuration file, set global variables
##################################################
testing = 0
cfgname = 'model.cfg'
# we only need the cfg file for the prefix of the cloudy runs and
# the name of the file with the observed column densities.
opt = parse_config(cfgname)
simnames = sorted(glob(opt['simname']))
#print opt['simname']
#print simnames
#CM = make_cmap_blue() # plt.cm.binary
#CM = make_cmap_red() # plt.cm.binary
CM = plt.cm.gist_heat_r # plt.cm.binary
#CM = plt.cm.afmhot_r # plt.cm.binary
#CM = plt.cm.bone_r # plt.cm.binary
#CM = plt.cm.terrain_r # plt.cm.binary
#CM = plt.cm.ocean_r # plt.cm.binary
trans = 'Tgas', 'NH'
if 1:
################################################################
# Read the cloudy grids and make the interpolators
################################################################
Ncloudy, Ncloudy_raw, Models, aUV = make_interpolators_uvbtilt(
trans, simnames)
M = Models[0]
#import pdb; pdb.set_trace()
Uconst_vals = []
for model in Models:
Uconst_vals.append((model['U'] + model['nH'])[0])
# note it's a function of aUV!
Uconst = AkimaSpline(aUV, Uconst_vals)
# Now find the parameter chains
samples = loadobj('samples_mcmc.sav.gz')
nwalkers, nsamples, npar = samples['chain'].shape
parvals = samples['chain'].reshape(-1, npar)
PAR = samples['par']
assert PAR['names'][-1] == 'aUV'
assert PAR['names'][-2] == 'Z'
assert PAR['names'][-3] == 'nH'
assert PAR['names'][-4] == 'NHI'
aUV = parvals[:,-1]
logZ = parvals[:,-2]
lognH = parvals[:,-3]
logNHI = parvals[:,-4]
logU = Uconst(aUV) - lognH
#import pdb; pdb.set_trace()
# call the interpolators with these parameter values.
logT = Ncloudy['Tgas'](parvals[:,-4:].T)
logNtot = Ncloudy['NH'](parvals[:,-4:].T)
# note this is log of D in kpc
logD = logNtot - lognH - np.log10(c.kpc.to(u.cm).value)
logP = logT + lognH
#import pdb; pdb.set_trace()
H_massfrac = 0.76 # (1 / mu)
# Joe's mass calculation
mass = 4./3. * pi * (3./4. * 10**logD * u.kpc)**3 * 10**lognH * \
u.cm**-3 * u.M_p / H_massfrac
# D = NH / nH
logM = np.log10(mass.to(u.M_sun).value)
if 1:
# print out the results and uncertainties
vals = dict(U=logU, T=logT, N=logNtot, D=logD, P=logP, M=logM,
nH=lognH, aUV=aUV, NHI=logNHI, Z=logZ)
levels = 0.6827, 0.9545
sigvals = {}
for key in vals:
sigvals[key] = find_min_interval(vals[key], levels[0])
if print_header:
print r'$\log(Z/Z_\odot)$&$\alpha_{UV}$ & $\log \nH$ & $\log U$& $\log \NHI$ & $\log \NH$& $\log T$ & $\log (P/k)$& $\log D$ & $\log M$ \\'
print r' & & (\cmmm) & & (\cmm) & (\cmm) & (K) & (\cmmm K) & (kpc) & (\msun) \\'
s = ''
ALLPAR = 'Z aUV nH U NHI N T P D M'.split()
for key in ALLPAR:
sig = 0.5 * (sigvals[key][1] - sigvals[key][0])
val = 0.5 * (sigvals[key][1] + sigvals[key][0])
if key in {'nH', 'D', 'P'}:
sig1 = np.hypot(sig, Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
elif key == 'M':
sig1 = np.hypot(sig, 2*Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
else:
s += '$%.2f\\pm%.2f$ &' % (val, sig)
print s[:-1] + r'\\'
if 1:
labels = dict(U='$U$', Z='$Z$', NHI='$N_\mathrm{HI}$', aUV=r'$\alpha_\mathrm{UV}$',
T='$T$', P='$P$', N='$N_\mathrm{H}$', D='$D$', M='$Mass$')
if 0:
fig = plt.figure(figsize=(12,12))
fig.subplots_adjust(left=0.05, bottom=0.05, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
names = 'U Z NHI aUV T P N D M'.split()
#direct = 'U Z NHI aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels)
plt.savefig('par.png', dpi=200)
if 1:
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(left=0.095, bottom=0.105, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=9.5)
plt.rc('ytick', labelsize=9.5)
names = 'U Z N aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels, fontsize=16)
axes['U Z'].set_ylabel('$\log_{10}U$')
axes['Z U'].set_ylabel('$\log_{10}[Z/Z_\odot]$')
axes['N U'].set_ylabel('$\log_{10}N_\mathrm{H}$')
axes['aUV U'].set_ylabel(r'$\log_{10}\alpha_\mathrm{UV}$')
axes['aUV U'].set_xlabel('$\log_{10}U$')
axes['aUV Z'].set_xlabel('$\log_{10}[Z/Z_\odot]$')
axes['aUV N'].set_xlabel('$\log_{10}N_\mathrm{H}$')
axes['N aUV'].set_xlabel(r'$\log_{10}\alpha_\mathrm{UV}$')
# special case:
if os.path.abspath('.') == '/Users/ncrighton/Projects/MPIA_QSO_LBG/Cloudy/J0004_NHI_2/comp1/final':
for k in ('N U', 'N Z', 'N aUV'):
axes[k].set_ylim(17.3, 19.2)
for k in ('U N', 'Z N', 'aUV N'):
axes[k].set_xlim(17.3, 19.2)
#plt.savefig('par2.pdf')
plt.savefig('par2.png',dpi=250)
|
mit
| 7,457,585,991,942,735,000 | 30.842857 | 147 | 0.515253 | false |
frederick623/wat
|
option_dl.py
|
1
|
1542
|
import sys
import urllib2
import zipfile
import StringIO
import datetime
output_path = "hkex_options"
def main(argv):
if len(argv) > 1:
dt = datetime.datetime.strptime(argv[1], "%Y%m%d").date()
else:
dt = datetime.date.today() + datetime.timedelta(days=-3)
while dt < datetime.date.today():
if int(dt.strftime("%w")) > 0 and int(dt.strftime("%w")) < 6:
try:
opt = "http://www.hkex.com.hk/eng/stat/dmstat/dayrpt/dqeYYMMDD.zip".replace("YYMMDD",dt.strftime("%y%m%d"))
data = urllib2.urlopen(opt).read()
z = zipfile.ZipFile(StringIO.StringIO(data))
z.extractall(output_path)
print "Downloaded " + opt
opt = "http://www.hkex.com.hk/eng/stat/dmstat/dayrpt/hsioYYMMDD.zip".replace("YYMMDD",dt.strftime("%y%m%d"))
data = urllib2.urlopen(opt).read()
z = zipfile.ZipFile(StringIO.StringIO(data))
z.extractall(output_path)
print "Downloaded " + opt
opt = "http://www.hkex.com.hk/eng/stat/dmstat/dayrpt/hhioYYMMDD.zip".replace("YYMMDD",dt.strftime("%y%m%d"))
data = urllib2.urlopen(opt).read()
z = zipfile.ZipFile(StringIO.StringIO(data))
z.extractall(output_path)
print "Downloaded " + opt
opt = "http://www.hkex.com.hk/eng/stat/dmstat/dayrpt/mhioYYMMDD.zip".replace("YYMMDD",dt.strftime("%y%m%d"))
data = urllib2.urlopen(opt).read()
z = zipfile.ZipFile(StringIO.StringIO(data))
z.extractall(output_path)
print "Downloaded " + opt
except:
print "Skipped " + dt.strftime("%y%m%d")
dt = dt + datetime.timedelta(days=1)
return
# main(sys.argv)
|
apache-2.0
| -3,635,285,752,176,639,000 | 31.145833 | 112 | 0.666018 | false |
anish/buildbot
|
master/buildbot/test/unit/test_steps_source_base_Source.py
|
1
|
6613
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.trial import unittest
from buildbot.steps.source import Source
from buildbot.test.util import sourcesteps
from buildbot.test.util import steps
from buildbot.test.util.misc import TestReactorMixin
class TestSource(sourcesteps.SourceStepMixin, TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_start_alwaysUseLatest_True(self):
step = self.setupStep(Source(alwaysUseLatest=True),
{
'branch': 'other-branch',
'revision': 'revision',
},
patch='patch'
)
step.branch = 'branch'
step.startVC = mock.Mock()
step.startStep(mock.Mock())
self.assertEqual(step.startVC.call_args, (('branch', None, None), {}))
def test_start_alwaysUseLatest_False(self):
step = self.setupStep(Source(),
{
'branch': 'other-branch',
'revision': 'revision',
},
patch='patch'
)
step.branch = 'branch'
step.startVC = mock.Mock()
step.startStep(mock.Mock())
self.assertEqual(
step.startVC.call_args, (('other-branch', 'revision', 'patch'), {}))
def test_start_alwaysUseLatest_False_no_branch(self):
step = self.setupStep(Source())
step.branch = 'branch'
step.startVC = mock.Mock()
step.startStep(mock.Mock())
self.assertEqual(step.startVC.call_args, (('branch', None, None), {}))
def test_start_no_codebase(self):
step = self.setupStep(Source())
step.branch = 'branch'
step.startVC = mock.Mock()
step.build.getSourceStamp = mock.Mock()
step.build.getSourceStamp.return_value = None
self.assertEqual(step.describe(), ['updating'])
self.assertEqual(step.name, Source.name)
step.startStep(mock.Mock())
self.assertEqual(step.build.getSourceStamp.call_args[0], ('',))
self.assertEqual(step.description, ['updating'])
def test_start_with_codebase(self):
step = self.setupStep(Source(codebase='codebase'))
step.branch = 'branch'
step.startVC = mock.Mock()
step.build.getSourceStamp = mock.Mock()
step.build.getSourceStamp.return_value = None
self.assertEqual(step.describe(), ['updating', 'codebase'])
step.name = self.successResultOf(step.build.render(step.name))
self.assertEqual(step.name, Source.name + "-codebase")
step.startStep(mock.Mock())
self.assertEqual(step.build.getSourceStamp.call_args[0], ('codebase',))
self.assertEqual(step.describe(True), ['update', 'codebase'])
def test_start_with_codebase_and_descriptionSuffix(self):
step = self.setupStep(Source(codebase='my-code',
descriptionSuffix='suffix'))
step.branch = 'branch'
step.startVC = mock.Mock()
step.build.getSourceStamp = mock.Mock()
step.build.getSourceStamp.return_value = None
self.assertEqual(step.describe(), ['updating', 'suffix'])
step.name = self.successResultOf(step.build.render(step.name))
self.assertEqual(step.name, Source.name + "-my-code")
step.startStep(mock.Mock())
self.assertEqual(step.build.getSourceStamp.call_args[0], ('my-code',))
self.assertEqual(step.describe(True), ['update', 'suffix'])
class TestSourceDescription(steps.BuildStepMixin, TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_constructor_args_strings(self):
step = Source(workdir='build',
description='svn update (running)',
descriptionDone='svn update')
self.assertEqual(step.description, ['svn update (running)'])
self.assertEqual(step.descriptionDone, ['svn update'])
def test_constructor_args_lists(self):
step = Source(workdir='build',
description=['svn', 'update', '(running)'],
descriptionDone=['svn', 'update'])
self.assertEqual(step.description, ['svn', 'update', '(running)'])
self.assertEqual(step.descriptionDone, ['svn', 'update'])
class AttrGroup(Source):
def other_method(self):
pass
def mode_full(self):
pass
def mode_incremental(self):
pass
class TestSourceAttrGroup(sourcesteps.SourceStepMixin, TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_attrgroup_hasattr(self):
step = AttrGroup()
self.assertTrue(step._hasAttrGroupMember('mode', 'full'))
self.assertTrue(step._hasAttrGroupMember('mode', 'incremental'))
self.assertFalse(step._hasAttrGroupMember('mode', 'nothing'))
def test_attrgroup_getattr(self):
step = AttrGroup()
self.assertEqual(step._getAttrGroupMember('mode', 'full'),
step.mode_full)
self.assertEqual(step._getAttrGroupMember('mode', 'incremental'),
step.mode_incremental)
with self.assertRaises(AttributeError):
step._getAttrGroupMember('mode', 'nothing')
def test_attrgroup_listattr(self):
step = AttrGroup()
self.assertEqual(sorted(step._listAttrGroupMembers('mode')),
['full', 'incremental'])
|
gpl-2.0
| 8,742,843,434,144,445,000 | 33.805263 | 80 | 0.617118 | false |
rschiang/ntusc-statute
|
scripts/utils.py
|
1
|
3691
|
# utils.py - additional functions
import re
# Common constants
RE_CJK_NUMERICS = r'〇ㄧ一二三四五六七八九十百零'
RE_CJK_NUMERICS_MIXED = r'〇ㄧ一二三四五六七八九十零壹貳參肆伍陸柒捌玖拾'
RE_CJK_NUMERICS_SINGLE = r'一二三四五六七八九十'
RE_CJK_PATTERN = '[\u3400-\u4DB5\u4E00-\u9FD5]'
RE_CJK_BOUNDARY_PRE = re.compile(r'(?<=' + RE_CJK_PATTERN + r')\s*([\d\-A-Za-z\(]+)')
RE_CJK_BOUNDARY_POST = re.compile(r'([\d\-A-Za-z\)]+)\s*(?=' + RE_CJK_PATTERN + r')')
RE_CJK_BRACKETED_NUMBER = re.compile(r'[(\(]([' + RE_CJK_NUMERICS + r']+)[\))]')
RE_BRACKETED_NUMBER = re.compile(r'[(\(](\d+)[\))]')
RE_FULLWIDTH_BRACKET = re.compile(r'(([A-Za-z\u00c0-\u04ff\s]+))')
RE_HALFWIDTH_BRACKET = re.compile(r'\(([^A-Za-z\))]+)\)')
UNICODE_THIN_SPACE = '\u2009'
RE_REPL_PRE = UNICODE_THIN_SPACE + r'\1'
RE_REPL_POST = r'\1' + UNICODE_THIN_SPACE
RE_REPL_FULLWIDTH_BRACKET = r'(\1)'
RE_REPL_HALFWIDTH_BRACKET = r'(\1)'
CJK_NUMERIC_INDEX = '零一二三四五六七八九'
CJK_BRACKETED_NUMBERS = '㈠㈡㈢㈣㈤㈥㈦㈧㈨㈩'
def normalize_spaces(text):
text = text.replace('\u3000', '')
text = RE_CJK_BOUNDARY_PRE.sub(RE_REPL_PRE, text)
text = RE_CJK_BOUNDARY_POST.sub(RE_REPL_POST, text)
return text
def normalize_brackets(text):
text = RE_HALFWIDTH_BRACKET.sub(RE_REPL_FULLWIDTH_BRACKET, text)
text = RE_FULLWIDTH_BRACKET.sub(RE_REPL_HALFWIDTH_BRACKET, text)
return text
def normalize_bracketed_numbers(text):
text = RE_CJK_BRACKETED_NUMBER.sub(repl_cjk_bracketed_numbers, text)
text = RE_BRACKETED_NUMBER.sub(r'<span class="bracketed number">\1</span>', text)
return text
def convert_cjk_number(text):
# Sniff alphanumerics
if text.isdecimal():
return text.lstrip('0')
# Normalize numeric representation
text = text.replace('〇', '零').replace('ㄧ', '一')
result = 0
# Sniff numeric type, handle formats like 一零三, 五四
if len(text) > 1 and '十' not in text and '百' not in text:
while len(text):
result *= 10
result += CJK_NUMERIC_INDEX.index(text[0])
text = text[1:]
return result
# Process regular format
digit = 0
for char in text:
value = CJK_NUMERIC_INDEX.find(char)
if value >= 0:
digit = value
else:
# Guess unit
if char == '百':
unit = 100
elif char == '十':
unit = 10
# 一 is probably omitted
if digit == 0:
result += unit
else:
result += digit * unit
# Reset digit
digit = 0
# Add the last digit
if digit > 0:
result += digit
return result
def repl_cjk_bracketed_numbers(matchobj):
text = matchobj.group(1)
index = RE_CJK_NUMERICS_SINGLE.find(text)
if index >= 0:
return CJK_BRACKETED_NUMBERS[index]
else:
return '({})'.format(convert_cjk_number(text))
def repl_cjk_date(matchobj):
return '民國{}年{}月{}日'.format(
convert_cjk_number(matchobj.group('year')),
convert_cjk_number(matchobj.group('month')),
convert_cjk_number(matchobj.group('day')))
def repl_cjk_semester(matchobj):
return '{}學年度第{}學期'.format(
convert_cjk_number(matchobj.group('year')),
convert_cjk_number(matchobj.group('semester')))
def repl_numeric_date(matchobj):
return '民國{}年{}月{}日'.format(*(d.lstrip('0') for d in matchobj.groups()))
def repl_numeric_inline_date(matchobj):
return '{}年{}月{}日'.format(*(d.lstrip('0') for d in matchobj.groups()))
|
gpl-3.0
| -1,518,688,606,506,065,200 | 31.764151 | 85 | 0.603225 | false |
er432/TASSELpy
|
TASSELpy/test/net/maizegenetics/dna/snp/GenotypeTableTest.py
|
1
|
17490
|
import unittest
import javabridge
from javabridge import JavaException, is_instance_of
import numpy as np
from TASSELpy.TASSELbridge import TASSELbridge
try:
try:
javabridge.get_env()
except AttributeError:
print("AttributeError: start bridge")
TASSELbridge.start()
except AssertionError:
print("AssertionError: start bridge")
TASSELbridge.start()
except:
raise RuntimeError("Could not start JVM")
from TASSELpy.net.maizegenetics.dna.WHICH_ALLELE import WHICH_ALLELE
from TASSELpy.net.maizegenetics.dna.snp.GenotypeTable import GenotypeTable
from TASSELpy.net.maizegenetics.dna.map.PositionList import PositionList
from TASSELpy.net.maizegenetics.taxa.TaxaList import TaxaList
from TASSELpy.net.maizegenetics.util.BitSet import BitSet
from TASSELpy.net.maizegenetics.dna.snp.ImportUtils import ImportUtils
from TASSELpy.utils.primativeArray import meta_byte_array, meta_long_array, meta_int_array, javaPrimativeArray
from TASSELpy.javaObj import javaArray
from TASSELpy.net.maizegenetics.dna.map.Chromosome import Chromosome
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.java.lang.Long import metaLong
from TASSELpy.java.lang.String import String,metaString
from TASSELpy.java.lang.Byte import metaByte
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Double import metaDouble
from TASSELpy.net.maizegenetics.dna.snp.depth.AlleleDepth import AlleleDepth
from TASSELpy.data import data_constants
debug = False
java_imports = {'IllegalStateException': 'java/lang/IllegalStateException',
'NullPointerException': 'java/lang/NullPointerException',
'UnsupportedOperationException': 'java/lang/UnsupportedOperationException'}
class GenotypeTableTest(unittest.TestCase):
""" Tests for GenotypeTable.py """
@classmethod
def setUpClass(cls):
# Load data
try:
cls.data = ImportUtils.readGuessFormat(data_constants.SHORT_HMP_FILE)
except:
raise ValueError("Could not load test data")
def test_genotypeArray(self):
if debug: print "Testing genotypeArray"
arr = self.data.genotypeArray(0,0)
self.assertIsInstance(arr,meta_byte_array)
def test_genotype(self):
if debug: print "Testing genotype"
first_site_chrom = self.data.chromosome(0)
first_site_pos = self.data.chromosomalPosition(0)
geno1 = self.data.genotype(0,0)
self.assertIsInstance(geno1, metaByte)
self.assertEqual(geno1, self.data.genotype(0,first_site_chrom,first_site_pos))
def test_genotypeRange(self):
if debug: print "Testing genotypeRange"
arr = self.data.genotypeRange(0,0,1)
self.assertIsInstance(arr,meta_byte_array)
def test_genotypeAllSites(self):
if debug: print "Testing genotypeAllSites"
arr = self.data.genotypeAllSites(0)
self.assertIsInstance(arr,meta_byte_array)
def test_genotypeAllTaxa(self):
if debug: print "Testing genotypeAllTaxa"
arr = self.data.genotypeAllTaxa(0)
self.assertIsInstance(arr,meta_byte_array)
def test_allelePresenceForAllSites(self):
if debug: print "Testing allelePresenceForAllSites"
bitset_major = self.data.allelePresenceForAllSites(0,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
def test_allelePresenceForSitesBlock(self):
if debug: print "Testing allelePresenceForSitesBlock"
arr = self.data.allelePresenceForSitesBlock(0,WHICH_ALLELE.Major,0,1)
self.assertIsInstance(arr,meta_long_array)
def test_haplotypeAllelePresenceForAllSites(self):
if debug: print "Testing haplotypeAllelePresenceForAllSites"
try:
bitset_major = self.data.haplotypeAllelePresenceForAllSites(0,True,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_haplotypeAllelePresenceForAllTaxa(self):
if debug: print "Testing haplotypeAllelePresenceForAllTaxa"
try:
bitset_major = self.data.haplotypeAllelePresenceForAllTaxa(0,True,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_haplotypeAllelePresenceForSitesBlock(self):
if debug: print "Testing haplotypeAllelePresenceForSitesBlock"
try:
arr = self.data.haplotypeAllelePresenceForSitesBlock(0,True,WHICH_ALLELE.Major,
0,1)
self.assertIsInstance(arr,meta_long_array)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_genotypeAsString(self):
if debug: print "Testing genotypeAsString"
geno1 = self.data.genotypeAsString(0,0)
geno2 = self.data.genotypeAsString(0,np.int8(0))
self.assertIsInstance(geno1,metaString)
self.assertIsInstance(geno2,metaString)
def test_genotypeAsStringRange(self):
if debug: print "Testing genotypeAsStringRange"
genos = self.data.genotypeAsStringRange(0,0,1)
self.assertIsInstance(genos,metaString)
def test_genotypeAsStringRow(self):
if debug: print "Testing genotypeAsStringRow"
genos = self.data.genotypeAsStringRow(0)
self.assertIsInstance(genos,metaString)
def test_genotypeAsStringArray(self):
if debug: print "Testing genotypeAsStringArray"
arr = self.data.genotypeAsStringArray(0,0)
self.assertIsInstance(arr[0],String)
def test_referenceAllele(self):
if debug: print "Testing referenceAllele"
ref = self.data.referenceAllele(0)
self.assertIsInstance(ref,metaByte)
def test_referenceAlleles(self):
if debug: print "Testing referenceAlleles"
arr = self.data.referenceAlleles(0,1)
self.assertIsInstance(arr,meta_byte_array)
def test_referenceAlleleForAllSites(self):
if debug: print "Testing referenceAlleleForAllSites"
arr = self.data.referenceAlleleForAllSites()
self.assertIsInstance(arr,meta_byte_array)
def test_hasReference(self):
if debug: print "Testing hasReference"
self.assertFalse(self.data.hasReference())
def test_isHeterozygous(self):
if debug: print "Testing isHeterozygous"
self.assertIsInstance(self.data.isHeterozygous(0,0),metaBoolean)
def test_heterozygousCount(self):
if debug: print "Testing heterozygousCount"
self.assertIsInstance(self.data.heterozygousCount(0),metaInteger)
def test_siteName(self):
if debug: print "Testing siteName"
self.assertIsInstance(self.data.siteName(0),metaString)
def test_chromosomeSiteCount(self):
if debug: print "Testing chromosomeSitecount"
first_site_chrom = self.data.chromosome(0)
count = self.data.chromosomeSiteCount(first_site_chrom)
self.assertIsInstance(count,metaInteger)
def test_firstLastSiteOfChromosome(self):
if debug: print "Testing firstLastSiteOfChromosome"
first_site_chrom = self.data.chromosome(0)
endpoints = self.data.firstLastSiteOfChromosome(first_site_chrom)
self.assertIsInstance(endpoints, meta_int_array)
def test_numberOfTaxa(self):
if debug: print "Testing numberOfTaxa"
self.assertIsInstance(self.data.numberOfTaxa(), metaInteger)
def test_positions(self):
if debug: print "Testing positions"
poslist = self.data.positions()
self.assertIsInstance(poslist, PositionList)
def test_chromosomalPosition(self):
if debug: print "Testing chromosomalPosition"
self.assertIsInstance(self.data.chromosomalPosition(0),metaInteger)
def test_siteOfPhysicalPosition(self):
if debug: print "Testing siteOfPhysicalPosition"
site1 = self.data.siteOfPhysicalPosition(data_constants.SHORT_HMP_FILE_FIRST_POS,
Chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM))
site2 = self.data.siteOfPhysicalPosition(data_constants.SHORT_HMP_FILE_FIRST_POS,
Chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM),
data_constants.SHORT_HMP_FILE_FIRST_SITENAME)
self.assertEquals(site1,0)
self.assertEqual(site1,site2)
def test_physicalPosition(self):
if debug: print "Testing physicalPositions"
positions = self.data.physicalPositions()
self.assertIsInstance(positions, meta_int_array)
def test_chromosomeName(self):
if debug: print "Testing chromosomeName"
self.assertEquals(self.data.chromosomeName(0), data_constants.SHORT_HMP_FILE_FIRST_CHROM)
def test_chromosome(self):
if debug: print "Testing chromosome"
chrom1 = self.data.chromosome(0)
chrom2 = self.data.chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM)
self.assertEquals(chrom1.getName(), data_constants.SHORT_HMP_FILE_FIRST_CHROM)
self.assertEqual(chrom1,chrom2)
def test_chromosomes(self):
if debug: print "Testing chromosomes"
chroms = self.data.chromosomes()
self.assertIsInstance(chroms,javaArray)
self.assertIsInstance(chroms[0], Chromosome)
def test_numChromosomes(self):
if debug: print "Testing numChromosomes"
self.assertIsInstance(self.data.numChromosomes(),metaInteger)
def test_chromosomesOffsets(self):
if debug: print "Testing chromosomesOffsets"
arr = self.data.chromosomesOffsets()
self.assertIsInstance(arr,meta_int_array)
def test_hasDepth(self):
if debug: print "Testing hasDepth"
self.assertIsInstance(self.data.hasDepth(),metaBoolean)
def test_hasAlleleProbabilities(self):
if debug: print("Testing hasAlleleProbabilities")
self.assertFalse(self.data.hasAlleleProbabilities())
def test_indelSize(self):
if debug: print "Testing indelSize"
self.assertIsInstance(self.data.indelSize(0),metaInteger)
def test_isIndel(self):
if debug: print "Testing isIndel"
self.assertIsInstance(self.data.isIndel(0),metaBoolean)
def test_isAllPolymorphic(self):
if debug: print "Testing isAllPolymorphic"
self.assertIsInstance(self.data.isAllPolymorphic(),metaBoolean)
def test_isPolymorphic(self):
if debug: print "Testing isPolymorphic"
self.assertIsInstance(self.data.isPolymorphic(0),metaBoolean)
def test_majorAllele(self):
if debug: print "Testing majorAllele"
self.assertIsInstance(self.data.majorAllele(0),metaByte)
def test_majorAlleleAsString(self):
if debug: print "Testing majorAlleleAsString"
self.assertIsInstance(self.data.majorAlleleAsString(0),metaString)
def test_minorAllele(self):
if debug: print "Testing minorAllele"
self.assertIsInstance(self.data.minorAllele(0),metaByte)
def test_minorAlleleAsString(self):
if debug: print "Testing minorAlleleAsString"
self.assertIsInstance(self.data.minorAlleleAsString(0),metaString)
def test_minorAlleles(self):
if debug: print "Testing minorAlleles"
self.assertIsInstance(self.data.minorAlleles(0),meta_byte_array)
def test_alleles(self):
if debug: print "Testing alleles"
self.assertIsInstance(self.data.alleles(0), meta_byte_array)
def test_minorAlleleFrequency(self):
if debug: print "Testing minorAlleleFrequency"
self.assertIsInstance(self.data.minorAlleleFrequency(0),metaDouble)
def test_majorAlleleFrequency(self):
if debug: print "Testing majorAlleleFrequency"
self.assertIsInstance(self.data.majorAlleleFrequency(0),metaDouble)
def test_taxa(self):
if debug: print "Testing taxa"
taxa = self.data.taxa()
self.assertIsInstance(taxa, TaxaList)
def test_taxaName(self):
if debug: print "Testing taxaName"
self.assertIsInstance(self.data.taxaName(0), metaString)
def test_genomeVersion(self):
if debug: print "Testing genomeVersion"
try:
version = self.data.genomeVersion()
if version is not None:
self.assertIsInstance(version, metaString)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_isPositiveStrand(self):
if debug: print "Testing isPositiveStrand"
self.assertIsInstance(self.data.isPositiveStrand(0),metaBoolean)
def test_compositeAlignments(self):
if debug: print "Testing compositeAlignments"
alns = self.data.compositeAlignments()
exp_arr_type = javaArray.get_array_type(GenotypeTable)
self.assertIsInstance(alns, exp_arr_type)
def test_allelesSortedByFrequency(self):
if debug: print "Testing allelesSortedByFrequency"
arr = self.data.allelesSortedByFrequency(0)
exp_arr_type = javaArray.get_array_type(javaPrimativeArray.get_array_type('int'))
self.assertIsInstance(arr,exp_arr_type)
def test_genosSortedByFrequency(self):
if debug: print "Testing genosSortedByFrequency"
arr = self.data.genosSortedByFrequency(0)
self.assertIsInstance(arr[0][0],metaString)
self.assertIsInstance(arr[1][0],metaInteger)
def test_isPhased(self):
if debug: print "Testing isPhased"
self.assertIsInstance(self.data.isPhased(),metaBoolean)
def test_retainsRareAlleles(self):
if debug: print "Testing retainsRareAlleles"
self.assertIsInstance(self.data.retainsRareAlleles(),metaBoolean)
def test_alleleDefinitions(self):
if debug: print "Testing alleleDefinitions"
arr1 = self.data.alleleDefinitions()
arr2 = self.data.alleleDefinitions(0)
self.assertIsInstance(arr1[0][0], metaString)
self.assertEqual(arr1[0][0], arr2[0])
def test_diploidAsString(self):
if debug: print "Testing diploidAsString"
val = self.data.diploidAsString(0,np.int8(0))
self.assertIsInstance(val,metaString)
def test_maxNumAlleles(self):
if debug: print "Testing maxNumAlleles"
self.assertIsInstance(self.data.maxNumAlleles(), metaInteger)
def test_totalGametesNonMissingForSites(self):
if debug: print "Testing totalGametesNonMissingForSite"
self.assertIsInstance(self.data.totalGametesNonMissingForSite(0), metaInteger)
def test_totalNonMissingForSite(self):
if debug: print "Testing totalNonMissingForSite"
self.assertIsInstance(self.data.totalNonMissingForSite(0), metaInteger)
def test_minorAlleleCount(self):
if debug: print "Testing minorAlleleCount"
self.assertIsInstance(self.data.minorAlleleCount(0), metaInteger)
def test_majorAlleleCount(self):
if debug: print "Testing majorAlleleCount"
self.assertIsInstance(self.data.majorAlleleCount(0), metaInteger)
def test_genoCount(self):
if debug: print "Testing genoCount"
arr = self.data.genoCounts()
self.assertIsInstance(arr[0][0], metaString)
self.assertIsInstance(arr[1][0], metaLong)
def test_majorMinorCounts(self):
if debug: print "Testing majorMinorCounts"
arr = self.data.majorMinorCounts()
self.assertIsInstance(arr[0][0], metaString)
self.assertIsInstance(arr[1][0], metaLong)
def test_totalGametesNonMissingForTaxon(self):
if debug: print "Testing totalGametesNonMissingForTaxon"
val = self.data.totalGametesNonMissingForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_heterozygousCountForTaxon(self):
if debug: print "Testing heterozygousCountForTaxon"
val = self.data.heterozygousCountForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_totalNonMissingForTaxon(self):
if debug: print "Testing totalNonMissingForTaxon"
val = self.data.totalNonMissingForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_depth(self):
if debug: print "Testing depth"
depth = self.data.depth()
self.assertTrue(depth is None or isinstance(depth, AlleleDepth))
def test_depthForAlleles(self):
if debug: print "Testing depthForAlleles"
try:
arr = self.data.depthForAlleles(0,0)
self.assertIsInstance(arr[0],metaInteger)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['NullPointerException']))
def test_allelesBySortType(self):
if debug: print "Testing allelesBySortType"
arr = self.data.allelesBySortType(self.data.ALLELE_SORT_TYPE.Reference,0)
self.assertTrue(arr is None or isinstance(arr, meta_byte_array))
def test_allelePresenceForAllTaxa(self):
if debug: print "Testing allelePresenceForAllTaxa"
bitset = self.data.allelePresenceForAllTaxa(0, WHICH_ALLELE.Major)
self.assertIsInstance(bitset, BitSet)
if __name__ == "__main__":
debug = True
unittest.main()
TASSELbridge.stop()
|
bsd-3-clause
| 1,118,779,781,050,724,100 | 48.971429 | 110 | 0.704803 | false |
ebursztein/SiteFab
|
SiteFab/linter/frontmatter.py
|
1
|
10590
|
# encoding: utf-8
from collections import Counter
import re
import os.path
#from https://mathiasbynens.be/demo/url-regex diego's one
VALID_URL= re.compile(r"^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\\x{00a1}\-\\x{ffff}0-9]+-?)*[a-z\\x{00a1}\-\\x{ffff}0-9]+)(?:\.(?:[a-z\\x{00a1}\-\\x{ffff}0-9]+-?)*[a-z\\x{00a1}\-\\x{ffff}0-9]+)*(?:\.(?:[a-z\\x{00a1}\-\\x{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$")
VALID_LOCAL_URL = re.compile(r"^/?[a-z0-9\/_\.\-=\?]+$")
VALID_FILENAME = re.compile(r'^[a-z\/][a-z0-9_\-/\.]+\.[a-z]{1,5}$')
def lint(post, test_info, config):
"Check the frontmatter of a given post for potential errors"
results = []
# Testing is meta exists otherwise bailout
if post.meta == None:
results.append(['E100', test_info['E100']])
return results
# Run metas tests
tests = [
e101_mandatory_fields,
e102_mandatory_fields_for_specific_templates,
e103_field_value,
e104_duplicate_value,
e105_category_in_tags,
e106_duplicate_spaces,
e107_e108_e109_authors_formating,
e110_lowercase_fields,
e111_e112_local_files_exists,
e113_e114_e115_banner_properly_formated,
e116_value_not_null,
e117_e118_e119_permanent_url_is_properly_formated,
e120_valid_permanent_url_prefix,
e121_file_properly_named,
]
for test in tests:
results += test(post, test_info, config)
return results
def e101_mandatory_fields(post, test_info, config):
"Check for the presence of mandatory fields in the meta"
results = []
for field in config.frontmatter_mandatory_fields:
if field not in post.meta:
results.append(['E101', test_info['E101'] % field])
return results
def e102_mandatory_fields_for_specific_templates(post, test_info, config):
"Check for the presense of mandatory field for specific template"
results = []
if "template" not in post.meta:
return results
if post.meta.template in config.frontmatter_mandatory_fields_by_templates:
for field in config.frontmatter_mandatory_fields_by_templates[post.meta.template]:
if field not in post.meta:
info = test_info['E102'] % (field, post.meta.template)
results.append(['E102', info])
return results
def e103_field_value(post, test_info, config):
"Check if the value for specific fields match the list"
results = []
for field in config.frontmatter_fields_value:
if field in post.meta:
if post.meta[field] not in config.frontmatter_fields_value[field]:
info = test_info['E103'] % (field, post.meta[field], config.frontmatter_fields_value[field])
results.append(['E103', info])
return results
def e104_duplicate_value(post, test_info, config):
"Check if a value appears twice in a field list"
results = []
for field in post.meta:
value_field = post.meta[field]
if isinstance(value_field, list):
count = Counter()
for elt in value_field:
try:
count[elt] += 1
except:
continue
duplicates = []
for elt in count.most_common():
if elt[1] > 1:
duplicates.append(elt[0])
if len(duplicates):
info = test_info['E104'] % (field, " ,".join(duplicates))
results.append(['E104', info])
return results
def e105_category_in_tags(post, test_info, config):
"Check if the category appears in the tag list"
results = []
if "category" in post.meta and "tags" in post.meta:
if post.meta.tags and post.meta.category in post.meta.tags:
info = test_info['E105'] % (post.meta.category, " ,".join(post.meta.tags))
results.append(['E105', info])
return results
def e106_duplicate_spaces(post, test_info, config):
"Check if there are extra spaces"
results = []
for field in post.meta:
value = post.meta[field]
if not isinstance(value, list):
value = [value]
for elt in value:
if isinstance(elt, str):
extra_space = re.search(" {2,}", elt)
if extra_space:
info = test_info['E106'] % (field, elt)
results.append(['E106', info])
return results
def e107_e108_e109_authors_formating(post, test_info, config):
"Check if the authors list is properly formatted"
results = []
if not "authors" in post.meta:
return results
authors = post.meta.authors
if not isinstance(authors, list):
info = test_info['E107'] % authors
results.append(['E107', authors])
return results
for author in authors:
if ',' not in author:
info = test_info['E108'] % authors
results.append(['E108', info])
else:
firstname, lastname = author.replace(' ', '').split(',')
if not firstname[0].isupper() or not lastname[0].isupper():
info = test_info['E109'] % firstname
results.append(['E109', info])
return results
def e110_lowercase_fields(post, test_info, config):
"Check that field values are indeed lowercase"
results = []
for field in config.frontmatter_field_values_must_be_lowercase:
if field in post.meta:
value = post.meta[field]
if not isinstance(value, list):
value = [value]
for elt in value:
if isinstance(elt, str):
if not elt.islower():
info = test_info['E110'] % (field, elt)
results.append(['E110', info])
return results
def e111_e112_local_files_exists(post, test_info, config):
"check if local files exists"
results = []
site_dir = config.site_output_dir
if "files" in post.meta:
if not isinstance(post.meta.files, dict):
info = test_info['E112'] % (type(post.meta.files))
results.append(['E112', info])
return results
for fname, fpath in post.meta.files.items():
if fpath[0] == '/':
full_path = os.path.join(site_dir, fpath[1:])
if not os.path.isfile(full_path):
info = test_info['E111'] % (fname, full_path)
results.append(['E111', info])
return results
def e113_e114_e115_banner_properly_formated(post, test_info, config):
"Ensure the banner is properly formated"
results = []
if not "banner" in post.meta:
return results
banner = post.meta.banner
if not isinstance(banner, str) and not isinstance(banner, unicode):
info = test_info['E113'] % (type(banner))
results.append(['E113', info])
return results
if "http" in banner[:6]:
if not VALID_URL.match(banner):
info = test_info['E114'] % (banner)
results.append(['E114', info])
else:
if not VALID_FILENAME.match(banner):
info = test_info['E115'] % (banner)
results.append(['E115', info])
return results
def e116_value_not_null(post, test_info, config):
"Ensure the field value are not null"
results = []
for field in post.meta:
if post.meta[field] == None:
info = test_info['E116'] % (field)
results.append(['E116', info])
return results
def e117_e118_e119_permanent_url_is_properly_formated(post, test_info, config):
results = []
if not "permanent_url" in post.meta:
return results
url = post.meta.permanent_url
if not isinstance(url, str) and not isinstance(url, unicode):
info = test_info['E117'] % (type(url))
results.append(['E117', info])
return results
if url != "" and not VALID_URL.match(url) and not VALID_LOCAL_URL.match(url):
info = test_info['E118'] % (url)
results.append(['E118', info])
if len(url) and url[0] != '/':
info = test_info['E119'] % (url)
results.append(['E119', info])
return results
def e120_valid_permanent_url_prefix(post, test_info, config):
"Check if the permanent url has a valid template based of its prefix"
results = []
if "template" not in post.meta or not "permanent_url" in post.meta:
return results
tlp = post.meta.template
if tlp not in config.permanent_url_valid_prefixes_by_template:
return results
prefix = config.permanent_url_valid_prefixes_by_template[tlp]
permanent_url = str( post.meta.permanent_url)
if not permanent_url.startswith(prefix):
info = test_info['E120'] % (permanent_url, prefix)
results.append(['E120', info])
return results
def e121_file_properly_named(post, test_info, config):
"Check if the files are properly named"
results = []
# test if it contains -slides.pdf or -paper.pdf
# test it contains the name of the short url (see rename tools)
if "files" not in post.meta or not isinstance(post.meta.files, dict):
return results
for t, f in post.meta.files.items():
# valid type
if t not in config.files.valid_types:
info = test_info['E121'] % (t)
results.append(['E121', info])
#valid characters
if not VALID_URL.match(f) and not VALID_LOCAL_URL.match(f):
info = test_info['E122'] % (f)
results.append(['E122', info])
#valid prefix
valid = False
for prefix in config.files.valid_prefixes:
if f.startswith(prefix):
valid = True
if not valid:
info = test_info['E123'] % (f, " ,".join(config.files.valid_prefixes))
results.append(['E123', info])
#valid suffix
valid = False
for suffix in config.files.valid_suffixes:
if f.endswith(suffix):
valid = True
if not valid and not f.startswith("http"):
info = test_info['E124'] % (f, " ,".join(config.files.valid_suffixes))
results.append(['E124', info])
return results
|
gpl-3.0
| -3,541,465,040,697,513,500 | 34.777027 | 541 | 0.570349 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.